@mariozechner/pi-ai 0.64.0 → 0.65.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/models.generated.d.ts +373 -101
- package/dist/models.generated.d.ts.map +1 -1
- package/dist/models.generated.js +503 -246
- package/dist/models.generated.js.map +1 -1
- package/dist/providers/amazon-bedrock.d.ts.map +1 -1
- package/dist/providers/amazon-bedrock.js +35 -7
- package/dist/providers/amazon-bedrock.js.map +1 -1
- package/dist/providers/openai-completions.d.ts.map +1 -1
- package/dist/providers/openai-completions.js +18 -6
- package/dist/providers/openai-completions.js.map +1 -1
- package/dist/providers/openai-responses-shared.d.ts.map +1 -1
- package/dist/providers/openai-responses-shared.js +12 -0
- package/dist/providers/openai-responses-shared.js.map +1 -1
- package/dist/types.d.ts +2 -0
- package/dist/types.d.ts.map +1 -1
- package/dist/types.js.map +1 -1
- package/dist/utils/overflow.d.ts +1 -1
- package/dist/utils/overflow.d.ts.map +1 -1
- package/dist/utils/overflow.js +23 -10
- package/dist/utils/overflow.js.map +1 -1
- package/package.json +1 -1
package/dist/utils/overflow.js
CHANGED
|
@@ -7,6 +7,7 @@
|
|
|
7
7
|
* Provider-specific patterns (with example error messages):
|
|
8
8
|
*
|
|
9
9
|
* - Anthropic: "prompt is too long: 213462 tokens > 200000 maximum"
|
|
10
|
+
* - Anthropic: "413 {\"error\":{\"type\":\"request_too_large\",\"message\":\"Request exceeds the maximum size\"}}"
|
|
10
11
|
* - OpenAI: "Your input exceeds the context window of this model"
|
|
11
12
|
* - Google: "The input token count (1196265) exceeds the maximum number of tokens allowed (1048575)"
|
|
12
13
|
* - xAI: "This model's maximum prompt length is 131072 but the request contains 537812 tokens"
|
|
@@ -17,13 +18,14 @@
|
|
|
17
18
|
* - GitHub Copilot: "prompt token count of X exceeds the limit of Y"
|
|
18
19
|
* - MiniMax: "invalid params, context window exceeds limit"
|
|
19
20
|
* - Kimi For Coding: "Your request exceeded model token limit: X (requested: Y)"
|
|
20
|
-
* - Cerebras:
|
|
21
|
+
* - Cerebras: "400/413 status code (no body)"
|
|
21
22
|
* - Mistral: "Prompt contains X tokens ... too large for model with Y maximum context length"
|
|
22
23
|
* - z.ai: Does NOT error, accepts overflow silently - handled via usage.input > contextWindow
|
|
23
24
|
* - Ollama: Some deployments truncate silently, others return errors like "prompt too long; exceeded max context length by X tokens"
|
|
24
25
|
*/
|
|
25
26
|
const OVERFLOW_PATTERNS = [
|
|
26
|
-
/prompt is too long/i, // Anthropic
|
|
27
|
+
/prompt is too long/i, // Anthropic token overflow
|
|
28
|
+
/request_too_large/i, // Anthropic request byte-size overflow (HTTP 413)
|
|
27
29
|
/input is too long for requested model/i, // Amazon Bedrock
|
|
28
30
|
/exceeds the context window/i, // OpenAI (Completions & Responses API)
|
|
29
31
|
/input token count.*exceeds the maximum/i, // Google (Gemini)
|
|
@@ -41,6 +43,21 @@ const OVERFLOW_PATTERNS = [
|
|
|
41
43
|
/context[_ ]length[_ ]exceeded/i, // Generic fallback
|
|
42
44
|
/too many tokens/i, // Generic fallback
|
|
43
45
|
/token limit exceeded/i, // Generic fallback
|
|
46
|
+
/^4(?:00|13)\s*(?:status code)?\s*\(no body\)/i, // Cerebras: 400/413 with no body
|
|
47
|
+
];
|
|
48
|
+
/**
|
|
49
|
+
* Patterns that indicate non-overflow errors (e.g. rate limiting, server errors).
|
|
50
|
+
* Error messages matching any of these are excluded from overflow detection
|
|
51
|
+
* even if they also match an OVERFLOW_PATTERN.
|
|
52
|
+
*
|
|
53
|
+
* Example: Bedrock formats throttling errors as "ThrottlingException: Too many tokens,
|
|
54
|
+
* please wait before trying again." which would match the /too many tokens/i overflow
|
|
55
|
+
* pattern without this exclusion.
|
|
56
|
+
*/
|
|
57
|
+
const NON_OVERFLOW_PATTERNS = [
|
|
58
|
+
/^(Throttling error|Service unavailable):/i, // AWS Bedrock non-overflow errors (human-readable prefixes from formatBedrockError)
|
|
59
|
+
/rate limit/i, // Generic rate limiting
|
|
60
|
+
/too many requests/i, // Generic HTTP 429 style
|
|
44
61
|
];
|
|
45
62
|
/**
|
|
46
63
|
* Check if an assistant message represents a context overflow error.
|
|
@@ -54,7 +71,7 @@ const OVERFLOW_PATTERNS = [
|
|
|
54
71
|
* ## Reliability by Provider
|
|
55
72
|
*
|
|
56
73
|
* **Reliable detection (returns error with detectable message):**
|
|
57
|
-
* - Anthropic: "prompt is too long: X tokens > Y maximum"
|
|
74
|
+
* - Anthropic: "prompt is too long: X tokens > Y maximum" or "request_too_large"
|
|
58
75
|
* - OpenAI (Completions & Responses): "exceeds the context window"
|
|
59
76
|
* - Google Gemini: "input token count exceeds the maximum"
|
|
60
77
|
* - xAI (Grok): "maximum prompt length is X but request contains Y"
|
|
@@ -91,13 +108,9 @@ const OVERFLOW_PATTERNS = [
|
|
|
91
108
|
export function isContextOverflow(message, contextWindow) {
|
|
92
109
|
// Case 1: Check error message patterns
|
|
93
110
|
if (message.stopReason === "error" && message.errorMessage) {
|
|
94
|
-
//
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
}
|
|
98
|
-
// Cerebras returns 400/413 with no body for context overflow
|
|
99
|
-
// Note: 429 is rate limiting (requests/tokens per time), NOT context overflow
|
|
100
|
-
if (/^4(00|13)\s*(status code)?\s*\(no body\)/i.test(message.errorMessage)) {
|
|
111
|
+
// Skip messages matching known non-overflow patterns (e.g. throttling / rate-limit)
|
|
112
|
+
const isNonOverflow = NON_OVERFLOW_PATTERNS.some((p) => p.test(message.errorMessage));
|
|
113
|
+
if (!isNonOverflow && OVERFLOW_PATTERNS.some((p) => p.test(message.errorMessage))) {
|
|
101
114
|
return true;
|
|
102
115
|
}
|
|
103
116
|
}
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"overflow.js","sourceRoot":"","sources":["../../src/utils/overflow.ts"],"names":[],"mappings":"AAEA
|
|
1
|
+
{"version":3,"file":"overflow.js","sourceRoot":"","sources":["../../src/utils/overflow.ts"],"names":[],"mappings":"AAEA;;;;;;;;;;;;;;;;;;;;;;;;GAwBG;AACH,MAAM,iBAAiB,GAAG;IACzB,qBAAqB,EAAE,2BAA2B;IAClD,oBAAoB,EAAE,kDAAkD;IACxE,wCAAwC,EAAE,iBAAiB;IAC3D,6BAA6B,EAAE,uCAAuC;IACtE,yCAAyC,EAAE,kBAAkB;IAC7D,+BAA+B,EAAE,aAAa;IAC9C,oCAAoC,EAAE,OAAO;IAC7C,uCAAuC,EAAE,4BAA4B;IACrE,2BAA2B,EAAE,iBAAiB;IAC9C,qCAAqC,EAAE,mBAAmB;IAC1D,kCAAkC,EAAE,YAAY;IAChD,+BAA+B,EAAE,UAAU;IAC3C,6BAA6B,EAAE,kBAAkB;IACjD,sDAAsD,EAAE,UAAU;IAClE,gCAAgC,EAAE,yDAAyD;IAC3F,oDAAoD,EAAE,iCAAiC;IACvF,gCAAgC,EAAE,mBAAmB;IACrD,kBAAkB,EAAE,mBAAmB;IACvC,uBAAuB,EAAE,mBAAmB;IAC5C,+CAA+C,EAAE,iCAAiC;CAClF,CAAC;AAEF;;;;;;;;GAQG;AACH,MAAM,qBAAqB,GAAG;IAC7B,2CAA2C,EAAE,oFAAoF;IACjI,aAAa,EAAE,wBAAwB;IACvC,oBAAoB,EAAE,yBAAyB;CAC/C,CAAC;AAEF;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;GA6CG;AACH,MAAM,UAAU,iBAAiB,CAAC,OAAyB,EAAE,aAAsB,EAAW;IAC7F,uCAAuC;IACvC,IAAI,OAAO,CAAC,UAAU,KAAK,OAAO,IAAI,OAAO,CAAC,YAAY,EAAE,CAAC;QAC5D,oFAAoF;QACpF,MAAM,aAAa,GAAG,qBAAqB,CAAC,IAAI,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,OAAO,CAAC,YAAa,CAAC,CAAC,CAAC;QACvF,IAAI,CAAC,aAAa,IAAI,iBAAiB,CAAC,IAAI,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,OAAO,CAAC,YAAa,CAAC,CAAC,EAAE,CAAC;YACpF,OAAO,IAAI,CAAC;QACb,CAAC;IACF,CAAC;IAED,8EAA8E;IAC9E,IAAI,aAAa,IAAI,OAAO,CAAC,UAAU,KAAK,MAAM,EAAE,CAAC;QACpD,MAAM,WAAW,GAAG,OAAO,CAAC,KAAK,CAAC,KAAK,GAAG,OAAO,CAAC,KAAK,CAAC,SAAS,CAAC;QAClE,IAAI,WAAW,GAAG,aAAa,EAAE,CAAC;YACjC,OAAO,IAAI,CAAC;QACb,CAAC;IACF,CAAC;IAED,OAAO,KAAK,CAAC;AAAA,CACb;AAED;;GAEG;AACH,MAAM,UAAU,mBAAmB,GAAa;IAC/C,OAAO,CAAC,GAAG,iBAAiB,CAAC,CAAC;AAAA,CAC9B","sourcesContent":["import type { AssistantMessage } from \"../types.js\";\n\n/**\n * Regex patterns to detect context overflow errors from different providers.\n *\n * These patterns match error messages returned when the input exceeds\n * the model's context window.\n *\n * Provider-specific patterns (with example error messages):\n *\n * - Anthropic: \"prompt is too long: 213462 tokens > 200000 maximum\"\n * - Anthropic: \"413 {\\\"error\\\":{\\\"type\\\":\\\"request_too_large\\\",\\\"message\\\":\\\"Request exceeds the maximum size\\\"}}\"\n * - OpenAI: \"Your input exceeds the context window of this model\"\n * - Google: \"The input token count (1196265) exceeds the maximum number of tokens allowed (1048575)\"\n * - xAI: \"This model's maximum prompt length is 131072 but the request contains 537812 tokens\"\n * - Groq: \"Please reduce the length of the messages or completion\"\n * - OpenRouter: \"This endpoint's maximum context length is X tokens. However, you requested about Y tokens\"\n * - llama.cpp: \"the request exceeds the available context size, try increasing it\"\n * - LM Studio: \"tokens to keep from the initial prompt is greater than the context length\"\n * - GitHub Copilot: \"prompt token count of X exceeds the limit of Y\"\n * - MiniMax: \"invalid params, context window exceeds limit\"\n * - Kimi For Coding: \"Your request exceeded model token limit: X (requested: Y)\"\n * - Cerebras: \"400/413 status code (no body)\"\n * - Mistral: \"Prompt contains X tokens ... too large for model with Y maximum context length\"\n * - z.ai: Does NOT error, accepts overflow silently - handled via usage.input > contextWindow\n * - Ollama: Some deployments truncate silently, others return errors like \"prompt too long; exceeded max context length by X tokens\"\n */\nconst OVERFLOW_PATTERNS = [\n\t/prompt is too long/i, // Anthropic token overflow\n\t/request_too_large/i, // Anthropic request byte-size overflow (HTTP 413)\n\t/input is too long for requested model/i, // Amazon Bedrock\n\t/exceeds the context window/i, // OpenAI (Completions & Responses API)\n\t/input token count.*exceeds the maximum/i, // Google (Gemini)\n\t/maximum prompt length is \\d+/i, // xAI (Grok)\n\t/reduce the length of the messages/i, // Groq\n\t/maximum context length is \\d+ tokens/i, // OpenRouter (all backends)\n\t/exceeds the limit of \\d+/i, // GitHub Copilot\n\t/exceeds the available context size/i, // llama.cpp server\n\t/greater than the context length/i, // LM Studio\n\t/context window exceeds limit/i, // MiniMax\n\t/exceeded model token limit/i, // Kimi For Coding\n\t/too large for model with \\d+ maximum context length/i, // Mistral\n\t/model_context_window_exceeded/i, // z.ai non-standard finish_reason surfaced as error text\n\t/prompt too long; exceeded (?:max )?context length/i, // Ollama explicit overflow error\n\t/context[_ ]length[_ ]exceeded/i, // Generic fallback\n\t/too many tokens/i, // Generic fallback\n\t/token limit exceeded/i, // Generic fallback\n\t/^4(?:00|13)\\s*(?:status code)?\\s*\\(no body\\)/i, // Cerebras: 400/413 with no body\n];\n\n/**\n * Patterns that indicate non-overflow errors (e.g. rate limiting, server errors).\n * Error messages matching any of these are excluded from overflow detection\n * even if they also match an OVERFLOW_PATTERN.\n *\n * Example: Bedrock formats throttling errors as \"ThrottlingException: Too many tokens,\n * please wait before trying again.\" which would match the /too many tokens/i overflow\n * pattern without this exclusion.\n */\nconst NON_OVERFLOW_PATTERNS = [\n\t/^(Throttling error|Service unavailable):/i, // AWS Bedrock non-overflow errors (human-readable prefixes from formatBedrockError)\n\t/rate limit/i, // Generic rate limiting\n\t/too many requests/i, // Generic HTTP 429 style\n];\n\n/**\n * Check if an assistant message represents a context overflow error.\n *\n * This handles two cases:\n * 1. Error-based overflow: Most providers return stopReason \"error\" with a\n * specific error message pattern.\n * 2. Silent overflow: Some providers accept overflow requests and return\n * successfully. For these, we check if usage.input exceeds the context window.\n *\n * ## Reliability by Provider\n *\n * **Reliable detection (returns error with detectable message):**\n * - Anthropic: \"prompt is too long: X tokens > Y maximum\" or \"request_too_large\"\n * - OpenAI (Completions & Responses): \"exceeds the context window\"\n * - Google Gemini: \"input token count exceeds the maximum\"\n * - xAI (Grok): \"maximum prompt length is X but request contains Y\"\n * - Groq: \"reduce the length of the messages\"\n * - Cerebras: 400/413 status code (no body)\n * - Mistral: \"Prompt contains X tokens ... too large for model with Y maximum context length\"\n * - OpenRouter (all backends): \"maximum context length is X tokens\"\n * - llama.cpp: \"exceeds the available context size\"\n * - LM Studio: \"greater than the context length\"\n * - Kimi For Coding: \"exceeded model token limit: X (requested: Y)\"\n *\n * **Unreliable detection:**\n * - z.ai: Sometimes accepts overflow silently (detectable via usage.input > contextWindow),\n * sometimes returns rate limit errors. Pass contextWindow param to detect silent overflow.\n * - Ollama: May truncate input silently for some setups, but may also return explicit\n * overflow errors that match the patterns above. Silent truncation still cannot be\n * detected here because we do not know the expected token count.\n *\n * ## Custom Providers\n *\n * If you've added custom models via settings.json, this function may not detect\n * overflow errors from those providers. To add support:\n *\n * 1. Send a request that exceeds the model's context window\n * 2. Check the errorMessage in the response\n * 3. Create a regex pattern that matches the error\n * 4. The pattern should be added to OVERFLOW_PATTERNS in this file, or\n * check the errorMessage yourself before calling this function\n *\n * @param message - The assistant message to check\n * @param contextWindow - Optional context window size for detecting silent overflow (z.ai)\n * @returns true if the message indicates a context overflow\n */\nexport function isContextOverflow(message: AssistantMessage, contextWindow?: number): boolean {\n\t// Case 1: Check error message patterns\n\tif (message.stopReason === \"error\" && message.errorMessage) {\n\t\t// Skip messages matching known non-overflow patterns (e.g. throttling / rate-limit)\n\t\tconst isNonOverflow = NON_OVERFLOW_PATTERNS.some((p) => p.test(message.errorMessage!));\n\t\tif (!isNonOverflow && OVERFLOW_PATTERNS.some((p) => p.test(message.errorMessage!))) {\n\t\t\treturn true;\n\t\t}\n\t}\n\n\t// Case 2: Silent overflow (z.ai style) - successful but usage exceeds context\n\tif (contextWindow && message.stopReason === \"stop\") {\n\t\tconst inputTokens = message.usage.input + message.usage.cacheRead;\n\t\tif (inputTokens > contextWindow) {\n\t\t\treturn true;\n\t\t}\n\t}\n\n\treturn false;\n}\n\n/**\n * Get the overflow patterns for testing purposes.\n */\nexport function getOverflowPatterns(): RegExp[] {\n\treturn [...OVERFLOW_PATTERNS];\n}\n"]}
|