@jeffreycao/copilot-api 1.5.3-beta.3 → 1.5.3-beta.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +1 -1
- package/dist/main.js +1 -1
- package/dist/{server-DLaz3q72.js → server-ViAUcjP9.js} +23 -4
- package/dist/server-ViAUcjP9.js.map +1 -0
- package/dist/{start-CrNDSGP_.js → start-O4rR2TxD.js} +2 -2
- package/dist/{start-CrNDSGP_.js.map → start-O4rR2TxD.js.map} +1 -1
- package/package.json +1 -1
- package/dist/server-DLaz3q72.js.map +0 -1
package/README.md
CHANGED
|
@@ -614,7 +614,7 @@ Here is an example `.claude/settings.json` file:
|
|
|
614
614
|
- Setting CLAUDE_CODE_ATTRIBUTION_HEADER to 0 can prevent Claude code from adding billing and version information in system prompts, thereby avoiding prompt cache invalidation.
|
|
615
615
|
- Turning off CLAUDE_CODE_ENABLE_PROMPT_SUGGESTION can prevent quota from being consumed unnecessarily.
|
|
616
616
|
- Permissions deny WebSearch because the GitHub Copilot API does not support natie websearch (some gpt models support websearch, but the current project has not adapted websearch); it is recommended to install the mcp mcp_server_fetch tool or other search tools as alternatives..
|
|
617
|
-
- Please do not enable `ENABLE_TOOL_SEARCH`, as the current Claude Code uses the client tool search mode. In this mode, loading defer tools requires an additional request each time, and cache hit rates are affected, so it does not necessarily save tokens. Only server tool search mode can save tokens.
|
|
617
|
+
- Please do not enable `ENABLE_TOOL_SEARCH`, as the current Claude Code uses the client tool search mode. In this mode, loading defer tools requires an additional request each time, and cache hit rates are affected, so it does not necessarily save tokens. Only server tool search mode can save tokens. If using the gpt model, do not enable it either.
|
|
618
618
|
|
|
619
619
|
You can find more options here: [Claude Code settings](https://docs.anthropic.com/en/docs/claude-code/settings#environment-variables)
|
|
620
620
|
|
package/dist/main.js
CHANGED
|
@@ -23,7 +23,7 @@ if (typeof args["enterprise-url"] === "string") process.env.COPILOT_API_ENTERPRI
|
|
|
23
23
|
const { auth } = await import("./auth-DqeBalDi.js");
|
|
24
24
|
const { checkUsage } = await import("./check-usage-BCT4zrbb.js");
|
|
25
25
|
const { debug } = await import("./debug-DcC7ZPH0.js");
|
|
26
|
-
const { start } = await import("./start-
|
|
26
|
+
const { start } = await import("./start-O4rR2TxD.js");
|
|
27
27
|
const main = defineCommand({
|
|
28
28
|
meta: {
|
|
29
29
|
name: "copilot-api",
|
|
@@ -744,7 +744,6 @@ function handleAssistantMessage(message, modelId) {
|
|
|
744
744
|
function mapContent(content) {
|
|
745
745
|
if (typeof content === "string") return content;
|
|
746
746
|
if (!Array.isArray(content)) return null;
|
|
747
|
-
if (!content.some((block) => block.type === "image")) return content.filter((block) => block.type === "text").map((block) => block.text).join("\n\n");
|
|
748
747
|
const contentParts = [];
|
|
749
748
|
for (const block of content) switch (block.type) {
|
|
750
749
|
case "text":
|
|
@@ -759,6 +758,12 @@ function mapContent(content) {
|
|
|
759
758
|
image_url: { url: `data:${block.source.media_type};base64,${block.source.data}` }
|
|
760
759
|
});
|
|
761
760
|
break;
|
|
761
|
+
case "tool_reference":
|
|
762
|
+
contentParts.push({
|
|
763
|
+
type: "text",
|
|
764
|
+
text: `Tool ${block.tool_name} loaded`
|
|
765
|
+
});
|
|
766
|
+
break;
|
|
762
767
|
}
|
|
763
768
|
return contentParts;
|
|
764
769
|
}
|
|
@@ -1370,6 +1375,9 @@ const convertToolResultContent = (content) => {
|
|
|
1370
1375
|
case "image":
|
|
1371
1376
|
result.push(createImageContent(block));
|
|
1372
1377
|
break;
|
|
1378
|
+
case "tool_reference":
|
|
1379
|
+
result.push(createTextContent(`Tool ${block.tool_name} loaded`));
|
|
1380
|
+
break;
|
|
1373
1381
|
default: break;
|
|
1374
1382
|
}
|
|
1375
1383
|
return result;
|
|
@@ -1924,6 +1932,7 @@ const compactSystemPromptStart = "You are a helpful AI assistant tasked with sum
|
|
|
1924
1932
|
const compactTextOnlyGuard = "CRITICAL: Respond with TEXT ONLY. Do NOT call any tools.";
|
|
1925
1933
|
const compactSummaryPromptStart = "Your task is to create a detailed summary of the conversation so far";
|
|
1926
1934
|
const compactMessageSections = ["Pending Tasks:", "Current Work:"];
|
|
1935
|
+
const TOOL_REFERENCE_TURN_BOUNDARY = "Tool loaded.";
|
|
1927
1936
|
const getAnthropicEffortForModel = (model) => {
|
|
1928
1937
|
const reasoningEffort = getReasoningEffortForModel(model);
|
|
1929
1938
|
if (reasoningEffort === "xhigh") return "max";
|
|
@@ -1976,6 +1985,13 @@ const mergeToolResult = (toolResults, textBlocks) => {
|
|
|
1976
1985
|
const lastIndex = toolResults.length - 1;
|
|
1977
1986
|
return toolResults.map((tr, i) => i === lastIndex ? mergeContentWithTexts(tr, textBlocks) : tr);
|
|
1978
1987
|
};
|
|
1988
|
+
const stripToolReferenceTurnBoundary = (anthropicPayload) => {
|
|
1989
|
+
for (const msg of anthropicPayload.messages) {
|
|
1990
|
+
if (msg.role !== "user" || !Array.isArray(msg.content)) continue;
|
|
1991
|
+
if (!msg.content.some((block) => block.type === "tool_result" && Array.isArray(block.content) && block.content.some((contentBlock) => contentBlock.type === "tool_reference"))) continue;
|
|
1992
|
+
msg.content = msg.content.filter((block) => block.type !== "text" || block.text.trim() !== TOOL_REFERENCE_TURN_BOUNDARY);
|
|
1993
|
+
}
|
|
1994
|
+
};
|
|
1979
1995
|
const mergeToolResultForClaude = (anthropicPayload) => {
|
|
1980
1996
|
for (const msg of anthropicPayload.messages) {
|
|
1981
1997
|
if (msg.role !== "user" || !Array.isArray(msg.content)) continue;
|
|
@@ -2412,7 +2428,7 @@ const isAsyncIterable$1 = (value) => Boolean(value) && typeof value[Symbol.async
|
|
|
2412
2428
|
//#region src/routes/messages/subagent-marker.ts
|
|
2413
2429
|
const subagentMarkerPrefix = "__SUBAGENT_MARKER__";
|
|
2414
2430
|
const parseSubagentMarkerFromFirstUser = (payload) => {
|
|
2415
|
-
const firstUserMessage = payload.messages.find((msg) => msg.role === "user");
|
|
2431
|
+
const firstUserMessage = payload.messages.find((msg) => msg.role === "user" && Array.isArray(msg.content));
|
|
2416
2432
|
if (!firstUserMessage || !Array.isArray(firstUserMessage.content)) return null;
|
|
2417
2433
|
for (const block of firstUserMessage.content) {
|
|
2418
2434
|
if (block.type !== "text") continue;
|
|
@@ -2470,7 +2486,10 @@ async function handleCompletion(c) {
|
|
|
2470
2486
|
const noTools = !anthropicPayload.tools || anthropicPayload.tools.length === 0;
|
|
2471
2487
|
if (anthropicBeta && noTools && !isCompact) anthropicPayload.model = getSmallModel();
|
|
2472
2488
|
if (isCompact) logger$5.debug("Is compact request:", isCompact);
|
|
2473
|
-
else
|
|
2489
|
+
else {
|
|
2490
|
+
stripToolReferenceTurnBoundary(anthropicPayload);
|
|
2491
|
+
mergeToolResultForClaude(anthropicPayload);
|
|
2492
|
+
}
|
|
2474
2493
|
const requestId = generateRequestIdFromPayload(anthropicPayload, sessionId);
|
|
2475
2494
|
logger$5.debug("Generated request ID:", requestId);
|
|
2476
2495
|
if (state.manualApprove) await awaitApproval();
|
|
@@ -2979,4 +2998,4 @@ server.route("/:provider/v1/models", providerModelRoutes);
|
|
|
2979
2998
|
|
|
2980
2999
|
//#endregion
|
|
2981
3000
|
export { server };
|
|
2982
|
-
//# sourceMappingURL=server-
|
|
3001
|
+
//# sourceMappingURL=server-ViAUcjP9.js.map
|