@dianshuv/copilot-api 0.2.0 → 0.2.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/dist/main.mjs +396 -363
  2. package/package.json +2 -2
package/dist/main.mjs CHANGED
@@ -1017,7 +1017,7 @@ const patchClaude = defineCommand({
1017
1017
 
1018
1018
  //#endregion
1019
1019
  //#region package.json
1020
- var version = "0.2.0";
1020
+ var version = "0.2.2";
1021
1021
 
1022
1022
  //#endregion
1023
1023
  //#region src/lib/adaptive-rate-limiter.ts
@@ -1406,7 +1406,7 @@ function recordResponse(id, response, durationMs) {
1406
1406
  }
1407
1407
  }
1408
1408
  function getHistory(options = {}) {
1409
- const { page = 1, limit = 50, model, endpoint, success, from, to, search, sessionId } = options;
1409
+ const { page = 1, limit = 50, model, endpoint, status, from, to, search, sessionId } = options;
1410
1410
  let filtered = [...historyState.entries];
1411
1411
  if (sessionId) filtered = filtered.filter((e) => e.sessionId === sessionId);
1412
1412
  if (model) {
@@ -1414,7 +1414,21 @@ function getHistory(options = {}) {
1414
1414
  filtered = filtered.filter((e) => e.request.model.toLowerCase().includes(modelLower) || e.response?.model.toLowerCase().includes(modelLower));
1415
1415
  }
1416
1416
  if (endpoint) filtered = filtered.filter((e) => e.endpoint === endpoint);
1417
- if (success !== void 0) filtered = filtered.filter((e) => e.response?.success === success);
1417
+ let effectiveStatus = status;
1418
+ const legacySuccess = options.success;
1419
+ if (!effectiveStatus && legacySuccess !== void 0) effectiveStatus = legacySuccess ? "success" : "error";
1420
+ switch (effectiveStatus) {
1421
+ case "success":
1422
+ filtered = filtered.filter((e) => e.response?.success === true);
1423
+ break;
1424
+ case "error":
1425
+ filtered = filtered.filter((e) => e.response !== void 0 && !e.response.success);
1426
+ break;
1427
+ case "pending":
1428
+ filtered = filtered.filter((e) => !e.response);
1429
+ break;
1430
+ default: break;
1431
+ }
1418
1432
  if (from) filtered = filtered.filter((e) => e.timestamp >= from);
1419
1433
  if (to) filtered = filtered.filter((e) => e.timestamp <= to);
1420
1434
  if (search) {
@@ -3278,6 +3292,11 @@ function handleGetEntries(c) {
3278
3292
  model: query.model || void 0,
3279
3293
  endpoint: query.endpoint,
3280
3294
  success: query.success ? query.success === "true" : void 0,
3295
+ status: [
3296
+ "success",
3297
+ "error",
3298
+ "pending"
3299
+ ].includes(query.status) ? query.status : void 0,
3281
3300
  from: query.from ? Number.parseInt(query.from, 10) : void 0,
3282
3301
  to: query.to ? Number.parseInt(query.to, 10) : void 0,
3283
3302
  search: query.search || void 0,
@@ -3554,11 +3573,11 @@ async function loadEntries() {
3554
3573
  if (currentSessionId) params.set('sessionId', currentSessionId);
3555
3574
 
3556
3575
  const endpoint = document.getElementById('filter-endpoint').value;
3557
- const success = document.getElementById('filter-success').value;
3576
+ const status = document.getElementById('filter-status').value;
3558
3577
  const search = document.getElementById('filter-search').value;
3559
3578
 
3560
3579
  if (endpoint) params.set('endpoint', endpoint);
3561
- if (success) params.set('success', success);
3580
+ if (status) params.set('status', status);
3562
3581
  if (search) params.set('search', search);
3563
3582
 
3564
3583
  try {
@@ -4311,10 +4330,11 @@ const template = `
4311
4330
  <option value="anthropic">Anthropic</option>
4312
4331
  <option value="openai">OpenAI</option>
4313
4332
  </select>
4314
- <select id="filter-success" onchange="loadEntries()">
4333
+ <select id="filter-status" onchange="loadEntries()">
4315
4334
  <option value="">All Status</option>
4316
- <option value="true">Success</option>
4317
- <option value="false">Failed</option>
4335
+ <option value="success">Success</option>
4336
+ <option value="error">Failed</option>
4337
+ <option value="pending">Pending</option>
4318
4338
  </select>
4319
4339
  </div>
4320
4340
 
@@ -4361,6 +4381,7 @@ function getHistoryUI() {
4361
4381
  <meta charset="UTF-8">
4362
4382
  <meta name="viewport" content="width=device-width, initial-scale=1.0">
4363
4383
  <title>Copilot API - Request History</title>
4384
+ <link rel="icon" href="data:,">
4364
4385
  <style>${styles}</style>
4365
4386
  </head>
4366
4387
  <body>
@@ -4907,6 +4928,211 @@ async function checkNeedsCompactionAnthropic(payload, model, config = {}) {
4907
4928
  };
4908
4929
  }
4909
4930
 
4931
+ //#endregion
4932
+ //#region src/services/copilot/create-anthropic-messages.ts
4933
+ /**
4934
+ * Direct Anthropic-style message API for Copilot.
4935
+ * Used when the model vendor is Anthropic and supports /v1/messages endpoint.
4936
+ */
4937
+ /**
4938
+ * Fields that are supported by Copilot's Anthropic API endpoint.
4939
+ * Any other fields in the incoming request will be stripped.
4940
+ */
4941
+ const COPILOT_SUPPORTED_FIELDS = new Set([
4942
+ "model",
4943
+ "messages",
4944
+ "max_tokens",
4945
+ "system",
4946
+ "metadata",
4947
+ "stop_sequences",
4948
+ "stream",
4949
+ "temperature",
4950
+ "top_p",
4951
+ "top_k",
4952
+ "tools",
4953
+ "tool_choice",
4954
+ "thinking",
4955
+ "service_tier"
4956
+ ]);
4957
+ /**
4958
+ * Filter payload to only include fields supported by Copilot's Anthropic API.
4959
+ * This prevents errors like "Extra inputs are not permitted" for unsupported
4960
+ * fields like `output_config`.
4961
+ *
4962
+ * Also converts server-side tools (web_search, etc.) to custom tools.
4963
+ */
4964
+ function filterPayloadForCopilot(payload) {
4965
+ const filtered = {};
4966
+ const unsupportedFields = [];
4967
+ for (const [key, value] of Object.entries(payload)) if (COPILOT_SUPPORTED_FIELDS.has(key)) filtered[key] = value;
4968
+ else unsupportedFields.push(key);
4969
+ if (unsupportedFields.length > 0) consola.debug(`[DirectAnthropic] Filtered unsupported fields: ${unsupportedFields.join(", ")}`);
4970
+ if (filtered.tools) filtered.tools = convertServerToolsToCustom(filtered.tools);
4971
+ return filtered;
4972
+ }
4973
+ /**
4974
+ * Adjust max_tokens if thinking is enabled.
4975
+ * According to Anthropic docs, max_tokens must be greater than thinking.budget_tokens.
4976
+ * max_tokens = thinking_budget + response_tokens
4977
+ */
4978
+ function adjustMaxTokensForThinking(payload) {
4979
+ const thinking = payload.thinking;
4980
+ if (!thinking) return payload;
4981
+ const budgetTokens = thinking.budget_tokens;
4982
+ if (!budgetTokens) return payload;
4983
+ if (payload.max_tokens <= budgetTokens) {
4984
+ const newMaxTokens = budgetTokens + Math.min(16384, budgetTokens);
4985
+ consola.debug(`[DirectAnthropic] Adjusted max_tokens: ${payload.max_tokens} → ${newMaxTokens} (thinking.budget_tokens=${budgetTokens})`);
4986
+ return {
4987
+ ...payload,
4988
+ max_tokens: newMaxTokens
4989
+ };
4990
+ }
4991
+ return payload;
4992
+ }
4993
+ /**
4994
+ * Create messages using Anthropic-style API directly.
4995
+ * This bypasses the OpenAI translation layer for Anthropic models.
4996
+ */
4997
+ async function createAnthropicMessages(payload, options) {
4998
+ if (!state.copilotToken) throw new Error("Copilot token not found");
4999
+ let filteredPayload = filterPayloadForCopilot(payload);
5000
+ filteredPayload = adjustMaxTokensForThinking(filteredPayload);
5001
+ const enableVision = filteredPayload.messages.some((msg) => {
5002
+ if (typeof msg.content === "string") return false;
5003
+ return msg.content.some((block) => block.type === "image");
5004
+ });
5005
+ const isAgentCall = filteredPayload.messages.some((msg) => msg.role === "assistant");
5006
+ const headers = {
5007
+ ...copilotHeaders(state, enableVision),
5008
+ "X-Initiator": options?.initiator ?? (isAgentCall ? "agent" : "user"),
5009
+ "anthropic-version": "2023-06-01"
5010
+ };
5011
+ consola.debug("Sending direct Anthropic request to Copilot /v1/messages");
5012
+ const response = await fetch(`${copilotBaseUrl(state)}/v1/messages`, {
5013
+ method: "POST",
5014
+ headers,
5015
+ body: JSON.stringify(filteredPayload)
5016
+ });
5017
+ if (!response.ok) {
5018
+ consola.debug("Request failed:", {
5019
+ model: filteredPayload.model,
5020
+ max_tokens: filteredPayload.max_tokens,
5021
+ stream: filteredPayload.stream,
5022
+ tools: filteredPayload.tools?.map((t) => ({
5023
+ name: t.name,
5024
+ type: t.type
5025
+ })),
5026
+ thinking: filteredPayload.thinking,
5027
+ messageCount: filteredPayload.messages.length
5028
+ });
5029
+ throw await HTTPError.fromResponse("Failed to create Anthropic messages", response, filteredPayload.model);
5030
+ }
5031
+ if (payload.stream) return events(response);
5032
+ return await response.json();
5033
+ }
5034
+ const SERVER_TOOL_CONFIGS = {
5035
+ web_search: {
5036
+ description: "Search the web for current information. Returns web search results that can help answer questions about recent events, current data, or information that may have changed since your knowledge cutoff.",
5037
+ input_schema: {
5038
+ type: "object",
5039
+ properties: { query: {
5040
+ type: "string",
5041
+ description: "The search query"
5042
+ } },
5043
+ required: ["query"]
5044
+ }
5045
+ },
5046
+ web_fetch: {
5047
+ description: "Fetch content from a URL. NOTE: This is a client-side tool - the client must fetch the URL and return the content.",
5048
+ input_schema: {
5049
+ type: "object",
5050
+ properties: { url: {
5051
+ type: "string",
5052
+ description: "The URL to fetch"
5053
+ } },
5054
+ required: ["url"]
5055
+ }
5056
+ },
5057
+ code_execution: {
5058
+ description: "Execute code in a sandbox. NOTE: This is a client-side tool - the client must execute the code.",
5059
+ input_schema: {
5060
+ type: "object",
5061
+ properties: {
5062
+ code: {
5063
+ type: "string",
5064
+ description: "The code to execute"
5065
+ },
5066
+ language: {
5067
+ type: "string",
5068
+ description: "The programming language"
5069
+ }
5070
+ },
5071
+ required: ["code"]
5072
+ }
5073
+ },
5074
+ computer: {
5075
+ description: "Control computer desktop. NOTE: This is a client-side tool - the client must handle computer control.",
5076
+ input_schema: {
5077
+ type: "object",
5078
+ properties: { action: {
5079
+ type: "string",
5080
+ description: "The action to perform"
5081
+ } },
5082
+ required: ["action"]
5083
+ }
5084
+ }
5085
+ };
5086
+ /**
5087
+ * Check if a tool is a server-side tool that needs conversion.
5088
+ */
5089
+ function getServerToolPrefix(tool) {
5090
+ if (tool.type) {
5091
+ for (const prefix of Object.keys(SERVER_TOOL_CONFIGS)) if (tool.type.startsWith(prefix)) return prefix;
5092
+ }
5093
+ return null;
5094
+ }
5095
+ /**
5096
+ * Convert server-side tools to custom tools, or pass them through unchanged.
5097
+ * This allows them to be passed to the API and handled by the client.
5098
+ *
5099
+ * Note: Server-side tools are only converted if state.rewriteAnthropicTools is enabled.
5100
+ */
5101
+ function convertServerToolsToCustom(tools) {
5102
+ if (!tools) return;
5103
+ const result = [];
5104
+ for (const tool of tools) {
5105
+ const serverToolPrefix = getServerToolPrefix(tool);
5106
+ if (serverToolPrefix) {
5107
+ const config = SERVER_TOOL_CONFIGS[serverToolPrefix];
5108
+ if (!state.rewriteAnthropicTools) {
5109
+ consola.debug(`[DirectAnthropic] Passing ${serverToolPrefix} through unchanged (use --rewrite-anthropic-tools to convert)`);
5110
+ result.push(tool);
5111
+ continue;
5112
+ }
5113
+ if (config.remove) {
5114
+ consola.warn(`[DirectAnthropic] Removing unsupported server tool: ${tool.name}. Reason: ${config.removalReason}`);
5115
+ continue;
5116
+ }
5117
+ consola.debug(`[DirectAnthropic] Converting server tool to custom: ${tool.name} (type: ${tool.type})`);
5118
+ result.push({
5119
+ name: tool.name,
5120
+ description: config.description,
5121
+ input_schema: config.input_schema
5122
+ });
5123
+ } else result.push(tool);
5124
+ }
5125
+ return result.length > 0 ? result : void 0;
5126
+ }
5127
+ /**
5128
+ * Check if a model supports direct Anthropic API.
5129
+ * Returns true if redirect is disabled (direct API is on) and the model is from Anthropic vendor.
5130
+ */
5131
+ function supportsDirectAnthropicApi(modelId) {
5132
+ if (state.redirectAnthropic) return false;
5133
+ return (state.models?.data.find((m) => m.id === modelId))?.vendor === "Anthropic";
5134
+ }
5135
+
4910
5136
  //#endregion
4911
5137
  //#region src/routes/messages/message-utils.ts
4912
5138
  function convertAnthropicMessages(messages) {
@@ -4976,51 +5202,106 @@ function mapOpenAIStopReasonToAnthropic(finishReason) {
4976
5202
  }
4977
5203
 
4978
5204
  //#endregion
4979
- //#region src/routes/messages/non-stream-translation.ts
4980
- const OPENAI_TOOL_NAME_LIMIT = 64;
4981
- /**
4982
- * Ensure all tool_use blocks have corresponding tool_result responses.
4983
- * This handles edge cases where conversation history may be incomplete:
4984
- * - Session interruptions where tool execution was cut off
4985
- * - Previous request failures
4986
- * - Client sending truncated history
4987
- *
4988
- * Adding placeholder responses prevents API errors and maintains protocol compliance.
4989
- */
4990
- function fixMessageSequence(messages) {
4991
- const fixedMessages = [];
4992
- for (let i = 0; i < messages.length; i++) {
4993
- const message = messages[i];
4994
- fixedMessages.push(message);
4995
- if (message.role === "assistant" && message.tool_calls && message.tool_calls.length > 0) {
4996
- const foundToolResponses = /* @__PURE__ */ new Set();
4997
- let j = i + 1;
4998
- while (j < messages.length && messages[j].role === "tool") {
4999
- const toolMessage = messages[j];
5000
- if (toolMessage.tool_call_id) foundToolResponses.add(toolMessage.tool_call_id);
5001
- j++;
5002
- }
5003
- for (const toolCall of message.tool_calls) if (!foundToolResponses.has(toolCall.id)) {
5004
- consola.debug(`Adding placeholder tool_result for ${toolCall.id}`);
5005
- fixedMessages.push({
5006
- role: "tool",
5007
- tool_call_id: toolCall.id,
5008
- content: "Tool execution was interrupted or failed."
5009
- });
5010
- }
5011
- }
5012
- }
5013
- return fixedMessages;
5014
- }
5015
- function translateToOpenAI(payload) {
5016
- const toolNameMapping = {
5017
- truncatedToOriginal: /* @__PURE__ */ new Map(),
5018
- originalToTruncated: /* @__PURE__ */ new Map()
5019
- };
5020
- const messages = translateAnthropicMessagesToOpenAI(payload.messages, payload.system, toolNameMapping);
5205
+ //#region src/routes/messages/stream-accumulator.ts
5206
+ function createAnthropicStreamAccumulator() {
5021
5207
  return {
5022
- payload: {
5023
- model: translateModelName(payload.model),
5208
+ model: "",
5209
+ inputTokens: 0,
5210
+ outputTokens: 0,
5211
+ stopReason: "",
5212
+ content: "",
5213
+ toolCalls: [],
5214
+ currentToolCall: null
5215
+ };
5216
+ }
5217
+ function processAnthropicEvent(event, acc) {
5218
+ switch (event.type) {
5219
+ case "content_block_delta":
5220
+ handleContentBlockDelta(event.delta, acc);
5221
+ break;
5222
+ case "content_block_start":
5223
+ handleContentBlockStart(event.content_block, acc);
5224
+ break;
5225
+ case "content_block_stop":
5226
+ handleContentBlockStop(acc);
5227
+ break;
5228
+ case "message_delta":
5229
+ handleMessageDelta(event.delta, event.usage, acc);
5230
+ break;
5231
+ default: break;
5232
+ }
5233
+ }
5234
+ function handleContentBlockDelta(delta, acc) {
5235
+ if (delta.type === "text_delta") acc.content += delta.text;
5236
+ else if (delta.type === "input_json_delta" && acc.currentToolCall) acc.currentToolCall.input += delta.partial_json;
5237
+ }
5238
+ function handleContentBlockStart(block, acc) {
5239
+ if (block.type === "tool_use") acc.currentToolCall = {
5240
+ id: block.id,
5241
+ name: block.name,
5242
+ input: ""
5243
+ };
5244
+ }
5245
+ function handleContentBlockStop(acc) {
5246
+ if (acc.currentToolCall) {
5247
+ acc.toolCalls.push(acc.currentToolCall);
5248
+ acc.currentToolCall = null;
5249
+ }
5250
+ }
5251
+ function handleMessageDelta(delta, usage, acc) {
5252
+ if (delta.stop_reason) acc.stopReason = delta.stop_reason;
5253
+ if (usage) {
5254
+ acc.inputTokens = usage.input_tokens ?? 0;
5255
+ acc.outputTokens = usage.output_tokens;
5256
+ }
5257
+ }
5258
+
5259
+ //#endregion
5260
+ //#region src/routes/messages/non-stream-translation.ts
5261
+ const OPENAI_TOOL_NAME_LIMIT = 64;
5262
+ /**
5263
+ * Ensure all tool_use blocks have corresponding tool_result responses.
5264
+ * This handles edge cases where conversation history may be incomplete:
5265
+ * - Session interruptions where tool execution was cut off
5266
+ * - Previous request failures
5267
+ * - Client sending truncated history
5268
+ *
5269
+ * Adding placeholder responses prevents API errors and maintains protocol compliance.
5270
+ */
5271
+ function fixMessageSequence(messages) {
5272
+ const fixedMessages = [];
5273
+ for (let i = 0; i < messages.length; i++) {
5274
+ const message = messages[i];
5275
+ fixedMessages.push(message);
5276
+ if (message.role === "assistant" && message.tool_calls && message.tool_calls.length > 0) {
5277
+ const foundToolResponses = /* @__PURE__ */ new Set();
5278
+ let j = i + 1;
5279
+ while (j < messages.length && messages[j].role === "tool") {
5280
+ const toolMessage = messages[j];
5281
+ if (toolMessage.tool_call_id) foundToolResponses.add(toolMessage.tool_call_id);
5282
+ j++;
5283
+ }
5284
+ for (const toolCall of message.tool_calls) if (!foundToolResponses.has(toolCall.id)) {
5285
+ consola.debug(`Adding placeholder tool_result for ${toolCall.id}`);
5286
+ fixedMessages.push({
5287
+ role: "tool",
5288
+ tool_call_id: toolCall.id,
5289
+ content: "Tool execution was interrupted or failed."
5290
+ });
5291
+ }
5292
+ }
5293
+ }
5294
+ return fixedMessages;
5295
+ }
5296
+ function translateToOpenAI(payload) {
5297
+ const toolNameMapping = {
5298
+ truncatedToOriginal: /* @__PURE__ */ new Map(),
5299
+ originalToTruncated: /* @__PURE__ */ new Map()
5300
+ };
5301
+ const messages = translateAnthropicMessagesToOpenAI(payload.messages, payload.system, toolNameMapping);
5302
+ return {
5303
+ payload: {
5304
+ model: translateModelName(payload.model),
5024
5305
  messages: fixMessageSequence(messages),
5025
5306
  max_tokens: payload.max_tokens,
5026
5307
  stop: payload.stop_sequences,
@@ -5073,7 +5354,8 @@ function translateModelName(model) {
5073
5354
  }
5074
5355
  if (/^claude-sonnet-4-5-\d+$/.test(model)) return "claude-sonnet-4.5";
5075
5356
  if (/^claude-sonnet-4-\d+$/.test(model)) return "claude-sonnet-4";
5076
- if (/^claude-opus-4-6$/.test(model)) return findLatestModel("claude-opus-4.6", "claude-opus-4.6");
5357
+ if (model === "claude-opus-4-6-1m") return "claude-opus-4.6-1m";
5358
+ if (/^claude-opus-4-6$/.test(model)) return "claude-opus-4.6";
5077
5359
  if (/^claude-opus-4-5-\d+$/.test(model)) return "claude-opus-4.5";
5078
5360
  if (/^claude-opus-4-\d+$/.test(model)) return findLatestModel("claude-opus", "claude-opus-4.5");
5079
5361
  if (/^claude-haiku-4-5-\d+$/.test(model)) return "claude-haiku-4.5";
@@ -5304,316 +5586,6 @@ function getAnthropicToolUseBlocks(toolCalls, toolNameMapping) {
5304
5586
  });
5305
5587
  }
5306
5588
 
5307
- //#endregion
5308
- //#region src/routes/messages/count-tokens-handler.ts
5309
- /**
5310
- * Handles token counting for Anthropic messages.
5311
- *
5312
- * For Anthropic models (vendor === "Anthropic"), uses the official Anthropic tokenizer.
5313
- * For other models, uses GPT tokenizers with appropriate buffers.
5314
- *
5315
- * When auto-truncate is enabled and the request would exceed limits,
5316
- * returns an inflated token count to trigger Claude Code's auto-compact mechanism.
5317
- */
5318
- async function handleCountTokens(c) {
5319
- try {
5320
- const anthropicBeta = c.req.header("anthropic-beta");
5321
- const anthropicPayload = await c.req.json();
5322
- const { payload: openAIPayload } = translateToOpenAI(anthropicPayload);
5323
- const selectedModel = state.models?.data.find((model) => model.id === openAIPayload.model);
5324
- if (!selectedModel) {
5325
- consola.warn("Model not found, returning default token count");
5326
- return c.json({ input_tokens: 1 });
5327
- }
5328
- if (state.autoTruncate) {
5329
- const truncateCheck = await checkNeedsCompactionAnthropic(anthropicPayload, selectedModel);
5330
- if (truncateCheck.needed) {
5331
- const contextWindow = selectedModel.capabilities?.limits?.max_context_window_tokens ?? 2e5;
5332
- const inflatedTokens = Math.floor(contextWindow * .95);
5333
- consola.debug(`[count_tokens] Would trigger auto-truncate: ${truncateCheck.currentTokens} tokens > ${truncateCheck.tokenLimit}, returning inflated count: ${inflatedTokens}`);
5334
- return c.json({ input_tokens: inflatedTokens });
5335
- }
5336
- }
5337
- const tokenizerName = selectedModel.capabilities?.tokenizer ?? "o200k_base";
5338
- const tokenCount = await getTokenCount(openAIPayload, selectedModel);
5339
- if (anthropicPayload.tools && anthropicPayload.tools.length > 0) {
5340
- let mcpToolExist = false;
5341
- if (anthropicBeta?.startsWith("claude-code")) mcpToolExist = anthropicPayload.tools.some((tool) => tool.name.startsWith("mcp__"));
5342
- if (!mcpToolExist) {
5343
- if (anthropicPayload.model.startsWith("claude")) tokenCount.input = tokenCount.input + 346;
5344
- else if (anthropicPayload.model.startsWith("grok")) tokenCount.input = tokenCount.input + 480;
5345
- }
5346
- }
5347
- let finalTokenCount = tokenCount.input + tokenCount.output;
5348
- if (!(selectedModel.vendor === "Anthropic")) finalTokenCount = anthropicPayload.model.startsWith("grok") ? Math.round(finalTokenCount * 1.03) : Math.round(finalTokenCount * 1.05);
5349
- consola.debug(`Token count: ${finalTokenCount} (tokenizer: ${tokenizerName})`);
5350
- return c.json({ input_tokens: finalTokenCount });
5351
- } catch (error) {
5352
- consola.error("Error counting tokens:", error);
5353
- return c.json({ input_tokens: 1 });
5354
- }
5355
- }
5356
-
5357
- //#endregion
5358
- //#region src/services/copilot/create-anthropic-messages.ts
5359
- /**
5360
- * Direct Anthropic-style message API for Copilot.
5361
- * Used when the model vendor is Anthropic and supports /v1/messages endpoint.
5362
- */
5363
- /**
5364
- * Fields that are supported by Copilot's Anthropic API endpoint.
5365
- * Any other fields in the incoming request will be stripped.
5366
- */
5367
- const COPILOT_SUPPORTED_FIELDS = new Set([
5368
- "model",
5369
- "messages",
5370
- "max_tokens",
5371
- "system",
5372
- "metadata",
5373
- "stop_sequences",
5374
- "stream",
5375
- "temperature",
5376
- "top_p",
5377
- "top_k",
5378
- "tools",
5379
- "tool_choice",
5380
- "thinking",
5381
- "service_tier"
5382
- ]);
5383
- /**
5384
- * Filter payload to only include fields supported by Copilot's Anthropic API.
5385
- * This prevents errors like "Extra inputs are not permitted" for unsupported
5386
- * fields like `output_config`.
5387
- *
5388
- * Also converts server-side tools (web_search, etc.) to custom tools.
5389
- */
5390
- function filterPayloadForCopilot(payload) {
5391
- const filtered = {};
5392
- const unsupportedFields = [];
5393
- for (const [key, value] of Object.entries(payload)) if (COPILOT_SUPPORTED_FIELDS.has(key)) filtered[key] = value;
5394
- else unsupportedFields.push(key);
5395
- if (unsupportedFields.length > 0) consola.debug(`[DirectAnthropic] Filtered unsupported fields: ${unsupportedFields.join(", ")}`);
5396
- if (filtered.tools) filtered.tools = convertServerToolsToCustom(filtered.tools);
5397
- return filtered;
5398
- }
5399
- /**
5400
- * Adjust max_tokens if thinking is enabled.
5401
- * According to Anthropic docs, max_tokens must be greater than thinking.budget_tokens.
5402
- * max_tokens = thinking_budget + response_tokens
5403
- */
5404
- function adjustMaxTokensForThinking(payload) {
5405
- const thinking = payload.thinking;
5406
- if (!thinking) return payload;
5407
- const budgetTokens = thinking.budget_tokens;
5408
- if (!budgetTokens) return payload;
5409
- if (payload.max_tokens <= budgetTokens) {
5410
- const newMaxTokens = budgetTokens + Math.min(16384, budgetTokens);
5411
- consola.debug(`[DirectAnthropic] Adjusted max_tokens: ${payload.max_tokens} → ${newMaxTokens} (thinking.budget_tokens=${budgetTokens})`);
5412
- return {
5413
- ...payload,
5414
- max_tokens: newMaxTokens
5415
- };
5416
- }
5417
- return payload;
5418
- }
5419
- /**
5420
- * Create messages using Anthropic-style API directly.
5421
- * This bypasses the OpenAI translation layer for Anthropic models.
5422
- */
5423
- async function createAnthropicMessages(payload, options) {
5424
- if (!state.copilotToken) throw new Error("Copilot token not found");
5425
- let filteredPayload = filterPayloadForCopilot(payload);
5426
- filteredPayload = adjustMaxTokensForThinking(filteredPayload);
5427
- const enableVision = filteredPayload.messages.some((msg) => {
5428
- if (typeof msg.content === "string") return false;
5429
- return msg.content.some((block) => block.type === "image");
5430
- });
5431
- const isAgentCall = filteredPayload.messages.some((msg) => msg.role === "assistant");
5432
- const headers = {
5433
- ...copilotHeaders(state, enableVision),
5434
- "X-Initiator": options?.initiator ?? (isAgentCall ? "agent" : "user"),
5435
- "anthropic-version": "2023-06-01"
5436
- };
5437
- consola.debug("Sending direct Anthropic request to Copilot /v1/messages");
5438
- const response = await fetch(`${copilotBaseUrl(state)}/v1/messages`, {
5439
- method: "POST",
5440
- headers,
5441
- body: JSON.stringify(filteredPayload)
5442
- });
5443
- if (!response.ok) {
5444
- consola.debug("Request failed:", {
5445
- model: filteredPayload.model,
5446
- max_tokens: filteredPayload.max_tokens,
5447
- stream: filteredPayload.stream,
5448
- tools: filteredPayload.tools?.map((t) => ({
5449
- name: t.name,
5450
- type: t.type
5451
- })),
5452
- thinking: filteredPayload.thinking,
5453
- messageCount: filteredPayload.messages.length
5454
- });
5455
- throw await HTTPError.fromResponse("Failed to create Anthropic messages", response, filteredPayload.model);
5456
- }
5457
- if (payload.stream) return events(response);
5458
- return await response.json();
5459
- }
5460
- const SERVER_TOOL_CONFIGS = {
5461
- web_search: {
5462
- description: "Search the web for current information. Returns web search results that can help answer questions about recent events, current data, or information that may have changed since your knowledge cutoff.",
5463
- input_schema: {
5464
- type: "object",
5465
- properties: { query: {
5466
- type: "string",
5467
- description: "The search query"
5468
- } },
5469
- required: ["query"]
5470
- }
5471
- },
5472
- web_fetch: {
5473
- description: "Fetch content from a URL. NOTE: This is a client-side tool - the client must fetch the URL and return the content.",
5474
- input_schema: {
5475
- type: "object",
5476
- properties: { url: {
5477
- type: "string",
5478
- description: "The URL to fetch"
5479
- } },
5480
- required: ["url"]
5481
- }
5482
- },
5483
- code_execution: {
5484
- description: "Execute code in a sandbox. NOTE: This is a client-side tool - the client must execute the code.",
5485
- input_schema: {
5486
- type: "object",
5487
- properties: {
5488
- code: {
5489
- type: "string",
5490
- description: "The code to execute"
5491
- },
5492
- language: {
5493
- type: "string",
5494
- description: "The programming language"
5495
- }
5496
- },
5497
- required: ["code"]
5498
- }
5499
- },
5500
- computer: {
5501
- description: "Control computer desktop. NOTE: This is a client-side tool - the client must handle computer control.",
5502
- input_schema: {
5503
- type: "object",
5504
- properties: { action: {
5505
- type: "string",
5506
- description: "The action to perform"
5507
- } },
5508
- required: ["action"]
5509
- }
5510
- }
5511
- };
5512
- /**
5513
- * Check if a tool is a server-side tool that needs conversion.
5514
- */
5515
- function getServerToolPrefix(tool) {
5516
- if (tool.type) {
5517
- for (const prefix of Object.keys(SERVER_TOOL_CONFIGS)) if (tool.type.startsWith(prefix)) return prefix;
5518
- }
5519
- return null;
5520
- }
5521
- /**
5522
- * Convert server-side tools to custom tools, or pass them through unchanged.
5523
- * This allows them to be passed to the API and handled by the client.
5524
- *
5525
- * Note: Server-side tools are only converted if state.rewriteAnthropicTools is enabled.
5526
- */
5527
- function convertServerToolsToCustom(tools) {
5528
- if (!tools) return;
5529
- const result = [];
5530
- for (const tool of tools) {
5531
- const serverToolPrefix = getServerToolPrefix(tool);
5532
- if (serverToolPrefix) {
5533
- const config = SERVER_TOOL_CONFIGS[serverToolPrefix];
5534
- if (!state.rewriteAnthropicTools) {
5535
- consola.debug(`[DirectAnthropic] Passing ${serverToolPrefix} through unchanged (use --rewrite-anthropic-tools to convert)`);
5536
- result.push(tool);
5537
- continue;
5538
- }
5539
- if (config.remove) {
5540
- consola.warn(`[DirectAnthropic] Removing unsupported server tool: ${tool.name}. Reason: ${config.removalReason}`);
5541
- continue;
5542
- }
5543
- consola.debug(`[DirectAnthropic] Converting server tool to custom: ${tool.name} (type: ${tool.type})`);
5544
- result.push({
5545
- name: tool.name,
5546
- description: config.description,
5547
- input_schema: config.input_schema
5548
- });
5549
- } else result.push(tool);
5550
- }
5551
- return result.length > 0 ? result : void 0;
5552
- }
5553
- /**
5554
- * Check if a model supports direct Anthropic API.
5555
- * Returns true if redirect is disabled (direct API is on) and the model is from Anthropic vendor.
5556
- */
5557
- function supportsDirectAnthropicApi(modelId) {
5558
- if (state.redirectAnthropic) return false;
5559
- return (state.models?.data.find((m) => m.id === modelId))?.vendor === "Anthropic";
5560
- }
5561
-
5562
- //#endregion
5563
- //#region src/routes/messages/stream-accumulator.ts
5564
- function createAnthropicStreamAccumulator() {
5565
- return {
5566
- model: "",
5567
- inputTokens: 0,
5568
- outputTokens: 0,
5569
- stopReason: "",
5570
- content: "",
5571
- toolCalls: [],
5572
- currentToolCall: null
5573
- };
5574
- }
5575
- function processAnthropicEvent(event, acc) {
5576
- switch (event.type) {
5577
- case "content_block_delta":
5578
- handleContentBlockDelta(event.delta, acc);
5579
- break;
5580
- case "content_block_start":
5581
- handleContentBlockStart(event.content_block, acc);
5582
- break;
5583
- case "content_block_stop":
5584
- handleContentBlockStop(acc);
5585
- break;
5586
- case "message_delta":
5587
- handleMessageDelta(event.delta, event.usage, acc);
5588
- break;
5589
- default: break;
5590
- }
5591
- }
5592
- function handleContentBlockDelta(delta, acc) {
5593
- if (delta.type === "text_delta") acc.content += delta.text;
5594
- else if (delta.type === "input_json_delta" && acc.currentToolCall) acc.currentToolCall.input += delta.partial_json;
5595
- }
5596
- function handleContentBlockStart(block, acc) {
5597
- if (block.type === "tool_use") acc.currentToolCall = {
5598
- id: block.id,
5599
- name: block.name,
5600
- input: ""
5601
- };
5602
- }
5603
- function handleContentBlockStop(acc) {
5604
- if (acc.currentToolCall) {
5605
- acc.toolCalls.push(acc.currentToolCall);
5606
- acc.currentToolCall = null;
5607
- }
5608
- }
5609
- function handleMessageDelta(delta, usage, acc) {
5610
- if (delta.stop_reason) acc.stopReason = delta.stop_reason;
5611
- if (usage) {
5612
- acc.inputTokens = usage.input_tokens ?? 0;
5613
- acc.outputTokens = usage.output_tokens;
5614
- }
5615
- }
5616
-
5617
5589
  //#endregion
5618
5590
  //#region src/routes/messages/stream-translation.ts
5619
5591
  function isToolBlockOpen(state) {
@@ -6212,9 +6184,19 @@ function recordStreamingResponse(acc, fallbackModel, ctx) {
6212
6184
 
6213
6185
  //#endregion
6214
6186
  //#region src/routes/messages/handler.ts
6187
+ function resolveModelFromBetaHeader(model, betaHeader) {
6188
+ if (!betaHeader || !/\bcontext-1m\b/.test(betaHeader)) return model;
6189
+ if (!model.startsWith("claude-")) return model;
6190
+ if (model.endsWith("-1m")) return model;
6191
+ const resolved = `${model}-1m`;
6192
+ consola.debug(`Detected context-1m in anthropic-beta header, resolving model: ${model} → ${resolved}`);
6193
+ return resolved;
6194
+ }
6215
6195
  async function handleCompletion(c) {
6216
6196
  const anthropicPayload = await c.req.json();
6217
6197
  consola.debug("Anthropic request payload:", JSON.stringify(anthropicPayload));
6198
+ const betaHeader = c.req.header("anthropic-beta");
6199
+ anthropicPayload.model = resolveModelFromBetaHeader(anthropicPayload.model, betaHeader);
6218
6200
  logToolInfo(anthropicPayload);
6219
6201
  const subagentMarker = parseSubagentMarkerFromFirstUser(anthropicPayload);
6220
6202
  const initiatorOverride = subagentMarker ? "agent" : void 0;
@@ -6259,6 +6241,57 @@ function logToolInfo(anthropicPayload) {
6259
6241
  }
6260
6242
  }
6261
6243
 
6244
+ //#endregion
6245
+ //#region src/routes/messages/count-tokens-handler.ts
6246
+ /**
6247
+ * Handles token counting for Anthropic messages.
6248
+ *
6249
+ * For Anthropic models (vendor === "Anthropic"), uses the official Anthropic tokenizer.
6250
+ * For other models, uses GPT tokenizers with appropriate buffers.
6251
+ *
6252
+ * When auto-truncate is enabled and the request would exceed limits,
6253
+ * returns an inflated token count to trigger Claude Code's auto-compact mechanism.
6254
+ */
6255
+ async function handleCountTokens(c) {
6256
+ try {
6257
+ const anthropicBeta = c.req.header("anthropic-beta");
6258
+ const anthropicPayload = await c.req.json();
6259
+ anthropicPayload.model = resolveModelFromBetaHeader(anthropicPayload.model, anthropicBeta);
6260
+ const { payload: openAIPayload } = translateToOpenAI(anthropicPayload);
6261
+ const selectedModel = state.models?.data.find((model) => model.id === openAIPayload.model);
6262
+ if (!selectedModel) {
6263
+ consola.warn("Model not found, returning default token count");
6264
+ return c.json({ input_tokens: 1 });
6265
+ }
6266
+ if (state.autoTruncate) {
6267
+ const truncateCheck = await checkNeedsCompactionAnthropic(anthropicPayload, selectedModel);
6268
+ if (truncateCheck.needed) {
6269
+ const contextWindow = selectedModel.capabilities?.limits?.max_context_window_tokens ?? 2e5;
6270
+ const inflatedTokens = Math.floor(contextWindow * .95);
6271
+ consola.debug(`[count_tokens] Would trigger auto-truncate: ${truncateCheck.currentTokens} tokens > ${truncateCheck.tokenLimit}, returning inflated count: ${inflatedTokens}`);
6272
+ return c.json({ input_tokens: inflatedTokens });
6273
+ }
6274
+ }
6275
+ const tokenizerName = selectedModel.capabilities?.tokenizer ?? "o200k_base";
6276
+ const tokenCount = await getTokenCount(openAIPayload, selectedModel);
6277
+ if (anthropicPayload.tools && anthropicPayload.tools.length > 0) {
6278
+ let mcpToolExist = false;
6279
+ if (anthropicBeta?.startsWith("claude-code")) mcpToolExist = anthropicPayload.tools.some((tool) => tool.name.startsWith("mcp__"));
6280
+ if (!mcpToolExist) {
6281
+ if (anthropicPayload.model.startsWith("claude")) tokenCount.input = tokenCount.input + 346;
6282
+ else if (anthropicPayload.model.startsWith("grok")) tokenCount.input = tokenCount.input + 480;
6283
+ }
6284
+ }
6285
+ let finalTokenCount = tokenCount.input + tokenCount.output;
6286
+ if (!(selectedModel.vendor === "Anthropic")) finalTokenCount = anthropicPayload.model.startsWith("grok") ? Math.round(finalTokenCount * 1.03) : Math.round(finalTokenCount * 1.05);
6287
+ consola.debug(`Token count: ${finalTokenCount} (tokenizer: ${tokenizerName})`);
6288
+ return c.json({ input_tokens: finalTokenCount });
6289
+ } catch (error) {
6290
+ consola.error("Error counting tokens:", error);
6291
+ return c.json({ input_tokens: 1 });
6292
+ }
6293
+ }
6294
+
6262
6295
  //#endregion
6263
6296
  //#region src/routes/messages/route.ts
6264
6297
  const messageRoutes = new Hono();
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@dianshuv/copilot-api",
3
- "version": "0.2.0",
3
+ "version": "0.2.2",
4
4
  "description": "Turn GitHub Copilot into OpenAI/Anthropic API compatible server. Usable with Claude Code!",
5
5
  "author": "dianshuv",
6
6
  "type": "module",
@@ -19,7 +19,7 @@
19
19
  "prepack": "npm run build",
20
20
  "prepare": "npm run build && (command -v bun >/dev/null 2>&1 && simple-git-hooks || true)",
21
21
  "prepublishOnly": "npm run typecheck && npm run lint:all && npm run test",
22
- "release": "npm publish --access public",
22
+ "release": "npm publish --access public --//registry.npmjs.org/:_authToken=$NPM_TOKEN",
23
23
  "start": "NODE_ENV=production bun run ./src/main.ts",
24
24
  "test": "bun test tests/*.test.ts",
25
25
  "test:all": "bun test tests/*.test.ts && bun test tests/integration/",