@dianshuv/copilot-api 0.4.2 → 0.4.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +10 -1
- package/dist/main.mjs +4 -4
- package/package.json +1 -1
package/README.md
CHANGED
|
@@ -61,7 +61,7 @@ copilot-api start
|
|
|
61
61
|
| `--proxy-env` | Use proxy from environment | false |
|
|
62
62
|
| `--no-history` | Disable request history UI at `/history` | false |
|
|
63
63
|
| `--history-limit` | Max history entries in memory | 1000 |
|
|
64
|
-
| `--no-auto-truncate` | Disable auto-truncate when exceeding limits | false |
|
|
64
|
+
| `--no-auto-truncate` | Disable auto-truncate when exceeding token limits | false |
|
|
65
65
|
| `--compress-tool-results` | Compress old tool results before truncating | false |
|
|
66
66
|
| `--redirect-anthropic` | Force Anthropic through OpenAI translation | false |
|
|
67
67
|
| `--no-rewrite-anthropic-tools` | Don't rewrite server-side tools | false |
|
|
@@ -106,6 +106,15 @@ copilot-api start
|
|
|
106
106
|
| `/history` | GET | Request history Web UI with token analytics (enabled by default) |
|
|
107
107
|
| `/history/api/*` | GET/DELETE | History API endpoints |
|
|
108
108
|
|
|
109
|
+
## Auto-Truncate
|
|
110
|
+
|
|
111
|
+
When enabled (default), auto-truncate automatically compacts conversation history when it exceeds the model's token limit. This prevents request failures due to context overflow.
|
|
112
|
+
|
|
113
|
+
- **Token-based truncation**: Uses the model's `max_context_window_tokens` from the Copilot API to determine when truncation is needed. A 2% safety margin is applied.
|
|
114
|
+
- **No preset byte limit**: There is no hardcoded request body size limit. If the Copilot API returns a 413 (Request Entity Too Large), the proxy dynamically learns the byte limit and applies it to subsequent requests.
|
|
115
|
+
- **Smart compression**: With `--compress-tool-results`, old tool results are compressed before removing messages, preserving more conversation context.
|
|
116
|
+
- **Orphan filtering**: After truncation, orphaned tool results (without matching tool calls) are automatically removed.
|
|
117
|
+
|
|
109
118
|
## Using with Claude Code
|
|
110
119
|
|
|
111
120
|
Create `.claude/settings.json` in your project:
|
package/dist/main.mjs
CHANGED
|
@@ -106,7 +106,7 @@ const GITHUB_APP_SCOPES = ["read:user"].join(" ");
|
|
|
106
106
|
*/
|
|
107
107
|
const DEFAULT_AUTO_TRUNCATE_CONFIG = {
|
|
108
108
|
safetyMarginPercent: 2,
|
|
109
|
-
maxRequestBodyBytes:
|
|
109
|
+
maxRequestBodyBytes: Infinity,
|
|
110
110
|
preserveRecentPercent: .7
|
|
111
111
|
};
|
|
112
112
|
/** Dynamic byte limit that adjusts based on 413 errors */
|
|
@@ -1021,7 +1021,7 @@ const patchClaude = defineCommand({
|
|
|
1021
1021
|
|
|
1022
1022
|
//#endregion
|
|
1023
1023
|
//#region package.json
|
|
1024
|
-
var version = "0.4.
|
|
1024
|
+
var version = "0.4.3";
|
|
1025
1025
|
|
|
1026
1026
|
//#endregion
|
|
1027
1027
|
//#region src/lib/adaptive-rate-limiter.ts
|
|
@@ -3521,7 +3521,7 @@ async function buildFinalPayload(payload, model) {
|
|
|
3521
3521
|
}
|
|
3522
3522
|
try {
|
|
3523
3523
|
const check = await checkNeedsCompactionOpenAI(payload, model);
|
|
3524
|
-
consola.debug(`Auto-truncate check: ${check.currentTokens} tokens (limit ${check.tokenLimit}), ${Math.round(check.currentBytes / 1024)}KB (limit ${Math.round(check.byteLimit / 1024)}KB), needed: ${check.needed}${check.reason ? ` (${check.reason})` : ""}`);
|
|
3524
|
+
consola.debug(`Auto-truncate check: ${check.currentTokens} tokens (limit ${check.tokenLimit}), ${Math.round(check.currentBytes / 1024)}KB (limit ${check.byteLimit === Infinity ? "unlimited" : `${Math.round(check.byteLimit / 1024)}KB`}), needed: ${check.needed}${check.reason ? ` (${check.reason})` : ""}`);
|
|
3525
3525
|
if (!check.needed) return {
|
|
3526
3526
|
finalPayload: payload,
|
|
3527
3527
|
truncateResult: null
|
|
@@ -6738,7 +6738,7 @@ async function handleDirectAnthropicCompletion(c, anthropicPayload, ctx, initiat
|
|
|
6738
6738
|
let truncateResult;
|
|
6739
6739
|
if (state.autoTruncate && selectedModel) {
|
|
6740
6740
|
const check = await checkNeedsCompactionAnthropic(anthropicPayload, selectedModel);
|
|
6741
|
-
consola.debug(`[Anthropic] Auto-truncate check: ${check.currentTokens} tokens (limit ${check.tokenLimit}), ${Math.round(check.currentBytes / 1024)}KB (limit ${Math.round(check.byteLimit / 1024)}KB), needed: ${check.needed}${check.reason ? ` (${check.reason})` : ""}`);
|
|
6741
|
+
consola.debug(`[Anthropic] Auto-truncate check: ${check.currentTokens} tokens (limit ${check.tokenLimit}), ${Math.round(check.currentBytes / 1024)}KB (limit ${check.byteLimit === Infinity ? "unlimited" : `${Math.round(check.byteLimit / 1024)}KB`}), needed: ${check.needed}${check.reason ? ` (${check.reason})` : ""}`);
|
|
6742
6742
|
if (check.needed) try {
|
|
6743
6743
|
truncateResult = await autoTruncateAnthropic(anthropicPayload, selectedModel);
|
|
6744
6744
|
if (truncateResult.wasCompacted) effectivePayload = truncateResult.payload;
|