@lenylvt/pi-ai 0.64.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +203 -0
- package/dist/api-registry.d.ts +20 -0
- package/dist/api-registry.d.ts.map +1 -0
- package/dist/api-registry.js +44 -0
- package/dist/api-registry.js.map +1 -0
- package/dist/cli.d.ts +3 -0
- package/dist/cli.d.ts.map +1 -0
- package/dist/cli.js +119 -0
- package/dist/cli.js.map +1 -0
- package/dist/env-api-keys.d.ts +7 -0
- package/dist/env-api-keys.d.ts.map +1 -0
- package/dist/env-api-keys.js +13 -0
- package/dist/env-api-keys.js.map +1 -0
- package/dist/index.d.ts +20 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +14 -0
- package/dist/index.js.map +1 -0
- package/dist/models.d.ts +24 -0
- package/dist/models.d.ts.map +1 -0
- package/dist/models.generated.d.ts +2332 -0
- package/dist/models.generated.d.ts.map +1 -0
- package/dist/models.generated.js +2186 -0
- package/dist/models.generated.js.map +1 -0
- package/dist/models.js +60 -0
- package/dist/models.js.map +1 -0
- package/dist/oauth.d.ts +2 -0
- package/dist/oauth.d.ts.map +1 -0
- package/dist/oauth.js +2 -0
- package/dist/oauth.js.map +1 -0
- package/dist/providers/anthropic.d.ts +40 -0
- package/dist/providers/anthropic.d.ts.map +1 -0
- package/dist/providers/anthropic.js +749 -0
- package/dist/providers/anthropic.js.map +1 -0
- package/dist/providers/faux.d.ts +56 -0
- package/dist/providers/faux.d.ts.map +1 -0
- package/dist/providers/faux.js +367 -0
- package/dist/providers/faux.js.map +1 -0
- package/dist/providers/github-copilot-headers.d.ts +8 -0
- package/dist/providers/github-copilot-headers.d.ts.map +1 -0
- package/dist/providers/github-copilot-headers.js +29 -0
- package/dist/providers/github-copilot-headers.js.map +1 -0
- package/dist/providers/openai-codex-responses.d.ts +9 -0
- package/dist/providers/openai-codex-responses.d.ts.map +1 -0
- package/dist/providers/openai-codex-responses.js +741 -0
- package/dist/providers/openai-codex-responses.js.map +1 -0
- package/dist/providers/openai-completions.d.ts +15 -0
- package/dist/providers/openai-completions.d.ts.map +1 -0
- package/dist/providers/openai-completions.js +687 -0
- package/dist/providers/openai-completions.js.map +1 -0
- package/dist/providers/openai-responses-shared.d.ts +17 -0
- package/dist/providers/openai-responses-shared.d.ts.map +1 -0
- package/dist/providers/openai-responses-shared.js +458 -0
- package/dist/providers/openai-responses-shared.js.map +1 -0
- package/dist/providers/openai-responses.d.ts +13 -0
- package/dist/providers/openai-responses.d.ts.map +1 -0
- package/dist/providers/openai-responses.js +190 -0
- package/dist/providers/openai-responses.js.map +1 -0
- package/dist/providers/register-builtins.d.ts +16 -0
- package/dist/providers/register-builtins.d.ts.map +1 -0
- package/dist/providers/register-builtins.js +140 -0
- package/dist/providers/register-builtins.js.map +1 -0
- package/dist/providers/simple-options.d.ts +8 -0
- package/dist/providers/simple-options.d.ts.map +1 -0
- package/dist/providers/simple-options.js +35 -0
- package/dist/providers/simple-options.js.map +1 -0
- package/dist/providers/transform-messages.d.ts +8 -0
- package/dist/providers/transform-messages.d.ts.map +1 -0
- package/dist/providers/transform-messages.js +155 -0
- package/dist/providers/transform-messages.js.map +1 -0
- package/dist/stream.d.ts +8 -0
- package/dist/stream.d.ts.map +1 -0
- package/dist/stream.js +27 -0
- package/dist/stream.js.map +1 -0
- package/dist/types.d.ts +283 -0
- package/dist/types.d.ts.map +1 -0
- package/dist/types.js +2 -0
- package/dist/types.js.map +1 -0
- package/dist/utils/event-stream.d.ts +21 -0
- package/dist/utils/event-stream.d.ts.map +1 -0
- package/dist/utils/event-stream.js +81 -0
- package/dist/utils/event-stream.js.map +1 -0
- package/dist/utils/hash.d.ts +3 -0
- package/dist/utils/hash.d.ts.map +1 -0
- package/dist/utils/hash.js +14 -0
- package/dist/utils/hash.js.map +1 -0
- package/dist/utils/json-parse.d.ts +9 -0
- package/dist/utils/json-parse.d.ts.map +1 -0
- package/dist/utils/json-parse.js +29 -0
- package/dist/utils/json-parse.js.map +1 -0
- package/dist/utils/oauth/anthropic.d.ts +25 -0
- package/dist/utils/oauth/anthropic.d.ts.map +1 -0
- package/dist/utils/oauth/anthropic.js +335 -0
- package/dist/utils/oauth/anthropic.js.map +1 -0
- package/dist/utils/oauth/github-copilot.d.ts +30 -0
- package/dist/utils/oauth/github-copilot.d.ts.map +1 -0
- package/dist/utils/oauth/github-copilot.js +292 -0
- package/dist/utils/oauth/github-copilot.js.map +1 -0
- package/dist/utils/oauth/index.d.ts +36 -0
- package/dist/utils/oauth/index.d.ts.map +1 -0
- package/dist/utils/oauth/index.js +92 -0
- package/dist/utils/oauth/index.js.map +1 -0
- package/dist/utils/oauth/oauth-page.d.ts +3 -0
- package/dist/utils/oauth/oauth-page.d.ts.map +1 -0
- package/dist/utils/oauth/oauth-page.js +105 -0
- package/dist/utils/oauth/oauth-page.js.map +1 -0
- package/dist/utils/oauth/openai-codex.d.ts +34 -0
- package/dist/utils/oauth/openai-codex.d.ts.map +1 -0
- package/dist/utils/oauth/openai-codex.js +373 -0
- package/dist/utils/oauth/openai-codex.js.map +1 -0
- package/dist/utils/oauth/pkce.d.ts +13 -0
- package/dist/utils/oauth/pkce.d.ts.map +1 -0
- package/dist/utils/oauth/pkce.js +31 -0
- package/dist/utils/oauth/pkce.js.map +1 -0
- package/dist/utils/oauth/types.d.ts +47 -0
- package/dist/utils/oauth/types.d.ts.map +1 -0
- package/dist/utils/oauth/types.js +2 -0
- package/dist/utils/oauth/types.js.map +1 -0
- package/dist/utils/overflow.d.ts +53 -0
- package/dist/utils/overflow.d.ts.map +1 -0
- package/dist/utils/overflow.js +119 -0
- package/dist/utils/overflow.js.map +1 -0
- package/dist/utils/sanitize-unicode.d.ts +22 -0
- package/dist/utils/sanitize-unicode.d.ts.map +1 -0
- package/dist/utils/sanitize-unicode.js +26 -0
- package/dist/utils/sanitize-unicode.js.map +1 -0
- package/dist/utils/typebox-helpers.d.ts +17 -0
- package/dist/utils/typebox-helpers.d.ts.map +1 -0
- package/dist/utils/typebox-helpers.js +21 -0
- package/dist/utils/typebox-helpers.js.map +1 -0
- package/dist/utils/validation.d.ts +18 -0
- package/dist/utils/validation.d.ts.map +1 -0
- package/dist/utils/validation.js +80 -0
- package/dist/utils/validation.js.map +1 -0
- package/package.json +89 -0
- package/src/api-registry.ts +98 -0
- package/src/cli.ts +136 -0
- package/src/env-api-keys.ts +22 -0
- package/src/index.ts +29 -0
- package/src/models.generated.ts +2188 -0
- package/src/models.ts +82 -0
- package/src/oauth.ts +1 -0
- package/src/providers/anthropic.ts +905 -0
- package/src/providers/faux.ts +498 -0
- package/src/providers/github-copilot-headers.ts +37 -0
- package/src/providers/openai-codex-responses.ts +929 -0
- package/src/providers/openai-completions.ts +811 -0
- package/src/providers/openai-responses-shared.ts +513 -0
- package/src/providers/openai-responses.ts +251 -0
- package/src/providers/register-builtins.ts +232 -0
- package/src/providers/simple-options.ts +46 -0
- package/src/providers/transform-messages.ts +172 -0
- package/src/stream.ts +59 -0
- package/src/types.ts +294 -0
- package/src/utils/event-stream.ts +87 -0
- package/src/utils/hash.ts +13 -0
- package/src/utils/json-parse.ts +28 -0
- package/src/utils/oauth/anthropic.ts +402 -0
- package/src/utils/oauth/github-copilot.ts +396 -0
- package/src/utils/oauth/index.ts +123 -0
- package/src/utils/oauth/oauth-page.ts +109 -0
- package/src/utils/oauth/openai-codex.ts +450 -0
- package/src/utils/oauth/pkce.ts +34 -0
- package/src/utils/oauth/types.ts +59 -0
- package/src/utils/overflow.ts +125 -0
- package/src/utils/sanitize-unicode.ts +25 -0
- package/src/utils/typebox-helpers.ts +24 -0
- package/src/utils/validation.ts +93 -0
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* PKCE utilities using Web Crypto API.
|
|
3
|
+
* Works in both Node.js 20+ and browsers.
|
|
4
|
+
*/
|
|
5
|
+
/**
|
|
6
|
+
* Encode bytes as base64url string.
|
|
7
|
+
*/
|
|
8
|
+
function base64urlEncode(bytes) {
|
|
9
|
+
let binary = "";
|
|
10
|
+
for (const byte of bytes) {
|
|
11
|
+
binary += String.fromCharCode(byte);
|
|
12
|
+
}
|
|
13
|
+
return btoa(binary).replace(/\+/g, "-").replace(/\//g, "_").replace(/=/g, "");
|
|
14
|
+
}
|
|
15
|
+
/**
|
|
16
|
+
* Generate PKCE code verifier and challenge.
|
|
17
|
+
* Uses Web Crypto API for cross-platform compatibility.
|
|
18
|
+
*/
|
|
19
|
+
export async function generatePKCE() {
|
|
20
|
+
// Generate random verifier
|
|
21
|
+
const verifierBytes = new Uint8Array(32);
|
|
22
|
+
crypto.getRandomValues(verifierBytes);
|
|
23
|
+
const verifier = base64urlEncode(verifierBytes);
|
|
24
|
+
// Compute SHA-256 challenge
|
|
25
|
+
const encoder = new TextEncoder();
|
|
26
|
+
const data = encoder.encode(verifier);
|
|
27
|
+
const hashBuffer = await crypto.subtle.digest("SHA-256", data);
|
|
28
|
+
const challenge = base64urlEncode(new Uint8Array(hashBuffer));
|
|
29
|
+
return { verifier, challenge };
|
|
30
|
+
}
|
|
31
|
+
//# sourceMappingURL=pkce.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"pkce.js","sourceRoot":"","sources":["../../../src/utils/oauth/pkce.ts"],"names":[],"mappings":"AAAA;;;GAGG;AAEH;;GAEG;AACH,SAAS,eAAe,CAAC,KAAiB,EAAU;IACnD,IAAI,MAAM,GAAG,EAAE,CAAC;IAChB,KAAK,MAAM,IAAI,IAAI,KAAK,EAAE,CAAC;QAC1B,MAAM,IAAI,MAAM,CAAC,YAAY,CAAC,IAAI,CAAC,CAAC;IACrC,CAAC;IACD,OAAO,IAAI,CAAC,MAAM,CAAC,CAAC,OAAO,CAAC,KAAK,EAAE,GAAG,CAAC,CAAC,OAAO,CAAC,KAAK,EAAE,GAAG,CAAC,CAAC,OAAO,CAAC,IAAI,EAAE,EAAE,CAAC,CAAC;AAAA,CAC9E;AAED;;;GAGG;AACH,MAAM,CAAC,KAAK,UAAU,YAAY,GAAqD;IACtF,2BAA2B;IAC3B,MAAM,aAAa,GAAG,IAAI,UAAU,CAAC,EAAE,CAAC,CAAC;IACzC,MAAM,CAAC,eAAe,CAAC,aAAa,CAAC,CAAC;IACtC,MAAM,QAAQ,GAAG,eAAe,CAAC,aAAa,CAAC,CAAC;IAEhD,4BAA4B;IAC5B,MAAM,OAAO,GAAG,IAAI,WAAW,EAAE,CAAC;IAClC,MAAM,IAAI,GAAG,OAAO,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC;IACtC,MAAM,UAAU,GAAG,MAAM,MAAM,CAAC,MAAM,CAAC,MAAM,CAAC,SAAS,EAAE,IAAI,CAAC,CAAC;IAC/D,MAAM,SAAS,GAAG,eAAe,CAAC,IAAI,UAAU,CAAC,UAAU,CAAC,CAAC,CAAC;IAE9D,OAAO,EAAE,QAAQ,EAAE,SAAS,EAAE,CAAC;AAAA,CAC/B","sourcesContent":["/**\n * PKCE utilities using Web Crypto API.\n * Works in both Node.js 20+ and browsers.\n */\n\n/**\n * Encode bytes as base64url string.\n */\nfunction base64urlEncode(bytes: Uint8Array): string {\n\tlet binary = \"\";\n\tfor (const byte of bytes) {\n\t\tbinary += String.fromCharCode(byte);\n\t}\n\treturn btoa(binary).replace(/\\+/g, \"-\").replace(/\\//g, \"_\").replace(/=/g, \"\");\n}\n\n/**\n * Generate PKCE code verifier and challenge.\n * Uses Web Crypto API for cross-platform compatibility.\n */\nexport async function generatePKCE(): Promise<{ verifier: string; challenge: string }> {\n\t// Generate random verifier\n\tconst verifierBytes = new Uint8Array(32);\n\tcrypto.getRandomValues(verifierBytes);\n\tconst verifier = base64urlEncode(verifierBytes);\n\n\t// Compute SHA-256 challenge\n\tconst encoder = new TextEncoder();\n\tconst data = encoder.encode(verifier);\n\tconst hashBuffer = await crypto.subtle.digest(\"SHA-256\", data);\n\tconst challenge = base64urlEncode(new Uint8Array(hashBuffer));\n\n\treturn { verifier, challenge };\n}\n"]}
|
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
import type { Api, Model } from "../../types.js";
|
|
2
|
+
export type OAuthCredentials = {
|
|
3
|
+
refresh: string;
|
|
4
|
+
access: string;
|
|
5
|
+
expires: number;
|
|
6
|
+
[key: string]: unknown;
|
|
7
|
+
};
|
|
8
|
+
export type OAuthProviderId = string;
|
|
9
|
+
/** @deprecated Use OAuthProviderId instead */
|
|
10
|
+
export type OAuthProvider = OAuthProviderId;
|
|
11
|
+
export type OAuthPrompt = {
|
|
12
|
+
message: string;
|
|
13
|
+
placeholder?: string;
|
|
14
|
+
allowEmpty?: boolean;
|
|
15
|
+
};
|
|
16
|
+
export type OAuthAuthInfo = {
|
|
17
|
+
url: string;
|
|
18
|
+
instructions?: string;
|
|
19
|
+
};
|
|
20
|
+
export interface OAuthLoginCallbacks {
|
|
21
|
+
onAuth: (info: OAuthAuthInfo) => void;
|
|
22
|
+
onPrompt: (prompt: OAuthPrompt) => Promise<string>;
|
|
23
|
+
onProgress?: (message: string) => void;
|
|
24
|
+
onManualCodeInput?: () => Promise<string>;
|
|
25
|
+
signal?: AbortSignal;
|
|
26
|
+
}
|
|
27
|
+
export interface OAuthProviderInterface {
|
|
28
|
+
readonly id: OAuthProviderId;
|
|
29
|
+
readonly name: string;
|
|
30
|
+
/** Run the login flow, return credentials to persist */
|
|
31
|
+
login(callbacks: OAuthLoginCallbacks): Promise<OAuthCredentials>;
|
|
32
|
+
/** Whether login uses a local callback server and supports manual code input. */
|
|
33
|
+
usesCallbackServer?: boolean;
|
|
34
|
+
/** Refresh expired credentials, return updated credentials to persist */
|
|
35
|
+
refreshToken(credentials: OAuthCredentials): Promise<OAuthCredentials>;
|
|
36
|
+
/** Convert credentials to API key string for the provider */
|
|
37
|
+
getApiKey(credentials: OAuthCredentials): string;
|
|
38
|
+
/** Optional: modify models for this provider (e.g., update baseUrl) */
|
|
39
|
+
modifyModels?(models: Model<Api>[], credentials: OAuthCredentials): Model<Api>[];
|
|
40
|
+
}
|
|
41
|
+
/** @deprecated Use OAuthProviderInterface instead */
|
|
42
|
+
export interface OAuthProviderInfo {
|
|
43
|
+
id: OAuthProviderId;
|
|
44
|
+
name: string;
|
|
45
|
+
available: boolean;
|
|
46
|
+
}
|
|
47
|
+
//# sourceMappingURL=types.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"types.d.ts","sourceRoot":"","sources":["../../../src/utils/oauth/types.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,GAAG,EAAE,KAAK,EAAE,MAAM,gBAAgB,CAAC;AAEjD,MAAM,MAAM,gBAAgB,GAAG;IAC9B,OAAO,EAAE,MAAM,CAAC;IAChB,MAAM,EAAE,MAAM,CAAC;IACf,OAAO,EAAE,MAAM,CAAC;IAChB,CAAC,GAAG,EAAE,MAAM,GAAG,OAAO,CAAC;CACvB,CAAC;AAEF,MAAM,MAAM,eAAe,GAAG,MAAM,CAAC;AAErC,8CAA8C;AAC9C,MAAM,MAAM,aAAa,GAAG,eAAe,CAAC;AAE5C,MAAM,MAAM,WAAW,GAAG;IACzB,OAAO,EAAE,MAAM,CAAC;IAChB,WAAW,CAAC,EAAE,MAAM,CAAC;IACrB,UAAU,CAAC,EAAE,OAAO,CAAC;CACrB,CAAC;AAEF,MAAM,MAAM,aAAa,GAAG;IAC3B,GAAG,EAAE,MAAM,CAAC;IACZ,YAAY,CAAC,EAAE,MAAM,CAAC;CACtB,CAAC;AAEF,MAAM,WAAW,mBAAmB;IACnC,MAAM,EAAE,CAAC,IAAI,EAAE,aAAa,KAAK,IAAI,CAAC;IACtC,QAAQ,EAAE,CAAC,MAAM,EAAE,WAAW,KAAK,OAAO,CAAC,MAAM,CAAC,CAAC;IACnD,UAAU,CAAC,EAAE,CAAC,OAAO,EAAE,MAAM,KAAK,IAAI,CAAC;IACvC,iBAAiB,CAAC,EAAE,MAAM,OAAO,CAAC,MAAM,CAAC,CAAC;IAC1C,MAAM,CAAC,EAAE,WAAW,CAAC;CACrB;AAED,MAAM,WAAW,sBAAsB;IACtC,QAAQ,CAAC,EAAE,EAAE,eAAe,CAAC;IAC7B,QAAQ,CAAC,IAAI,EAAE,MAAM,CAAC;IAEtB,wDAAwD;IACxD,KAAK,CAAC,SAAS,EAAE,mBAAmB,GAAG,OAAO,CAAC,gBAAgB,CAAC,CAAC;IAEjE,iFAAiF;IACjF,kBAAkB,CAAC,EAAE,OAAO,CAAC;IAE7B,yEAAyE;IACzE,YAAY,CAAC,WAAW,EAAE,gBAAgB,GAAG,OAAO,CAAC,gBAAgB,CAAC,CAAC;IAEvE,6DAA6D;IAC7D,SAAS,CAAC,WAAW,EAAE,gBAAgB,GAAG,MAAM,CAAC;IAEjD,uEAAuE;IACvE,YAAY,CAAC,CAAC,MAAM,EAAE,KAAK,CAAC,GAAG,CAAC,EAAE,EAAE,WAAW,EAAE,gBAAgB,GAAG,KAAK,CAAC,GAAG,CAAC,EAAE,CAAC;CACjF;AAED,qDAAqD;AACrD,MAAM,WAAW,iBAAiB;IACjC,EAAE,EAAE,eAAe,CAAC;IACpB,IAAI,EAAE,MAAM,CAAC;IACb,SAAS,EAAE,OAAO,CAAC;CACnB","sourcesContent":["import type { Api, Model } from \"../../types.js\";\n\nexport type OAuthCredentials = {\n\trefresh: string;\n\taccess: string;\n\texpires: number;\n\t[key: string]: unknown;\n};\n\nexport type OAuthProviderId = string;\n\n/** @deprecated Use OAuthProviderId instead */\nexport type OAuthProvider = OAuthProviderId;\n\nexport type OAuthPrompt = {\n\tmessage: string;\n\tplaceholder?: string;\n\tallowEmpty?: boolean;\n};\n\nexport type OAuthAuthInfo = {\n\turl: string;\n\tinstructions?: string;\n};\n\nexport interface OAuthLoginCallbacks {\n\tonAuth: (info: OAuthAuthInfo) => void;\n\tonPrompt: (prompt: OAuthPrompt) => Promise<string>;\n\tonProgress?: (message: string) => void;\n\tonManualCodeInput?: () => Promise<string>;\n\tsignal?: AbortSignal;\n}\n\nexport interface OAuthProviderInterface {\n\treadonly id: OAuthProviderId;\n\treadonly name: string;\n\n\t/** Run the login flow, return credentials to persist */\n\tlogin(callbacks: OAuthLoginCallbacks): Promise<OAuthCredentials>;\n\n\t/** Whether login uses a local callback server and supports manual code input. */\n\tusesCallbackServer?: boolean;\n\n\t/** Refresh expired credentials, return updated credentials to persist */\n\trefreshToken(credentials: OAuthCredentials): Promise<OAuthCredentials>;\n\n\t/** Convert credentials to API key string for the provider */\n\tgetApiKey(credentials: OAuthCredentials): string;\n\n\t/** Optional: modify models for this provider (e.g., update baseUrl) */\n\tmodifyModels?(models: Model<Api>[], credentials: OAuthCredentials): Model<Api>[];\n}\n\n/** @deprecated Use OAuthProviderInterface instead */\nexport interface OAuthProviderInfo {\n\tid: OAuthProviderId;\n\tname: string;\n\tavailable: boolean;\n}\n"]}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"types.js","sourceRoot":"","sources":["../../../src/utils/oauth/types.ts"],"names":[],"mappings":"","sourcesContent":["import type { Api, Model } from \"../../types.js\";\n\nexport type OAuthCredentials = {\n\trefresh: string;\n\taccess: string;\n\texpires: number;\n\t[key: string]: unknown;\n};\n\nexport type OAuthProviderId = string;\n\n/** @deprecated Use OAuthProviderId instead */\nexport type OAuthProvider = OAuthProviderId;\n\nexport type OAuthPrompt = {\n\tmessage: string;\n\tplaceholder?: string;\n\tallowEmpty?: boolean;\n};\n\nexport type OAuthAuthInfo = {\n\turl: string;\n\tinstructions?: string;\n};\n\nexport interface OAuthLoginCallbacks {\n\tonAuth: (info: OAuthAuthInfo) => void;\n\tonPrompt: (prompt: OAuthPrompt) => Promise<string>;\n\tonProgress?: (message: string) => void;\n\tonManualCodeInput?: () => Promise<string>;\n\tsignal?: AbortSignal;\n}\n\nexport interface OAuthProviderInterface {\n\treadonly id: OAuthProviderId;\n\treadonly name: string;\n\n\t/** Run the login flow, return credentials to persist */\n\tlogin(callbacks: OAuthLoginCallbacks): Promise<OAuthCredentials>;\n\n\t/** Whether login uses a local callback server and supports manual code input. */\n\tusesCallbackServer?: boolean;\n\n\t/** Refresh expired credentials, return updated credentials to persist */\n\trefreshToken(credentials: OAuthCredentials): Promise<OAuthCredentials>;\n\n\t/** Convert credentials to API key string for the provider */\n\tgetApiKey(credentials: OAuthCredentials): string;\n\n\t/** Optional: modify models for this provider (e.g., update baseUrl) */\n\tmodifyModels?(models: Model<Api>[], credentials: OAuthCredentials): Model<Api>[];\n}\n\n/** @deprecated Use OAuthProviderInterface instead */\nexport interface OAuthProviderInfo {\n\tid: OAuthProviderId;\n\tname: string;\n\tavailable: boolean;\n}\n"]}
|
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
import type { AssistantMessage } from "../types.js";
|
|
2
|
+
/**
|
|
3
|
+
* Check if an assistant message represents a context overflow error.
|
|
4
|
+
*
|
|
5
|
+
* This handles two cases:
|
|
6
|
+
* 1. Error-based overflow: Most providers return stopReason "error" with a
|
|
7
|
+
* specific error message pattern.
|
|
8
|
+
* 2. Silent overflow: Some providers accept overflow requests and return
|
|
9
|
+
* successfully. For these, we check if usage.input exceeds the context window.
|
|
10
|
+
*
|
|
11
|
+
* ## Reliability by Provider
|
|
12
|
+
*
|
|
13
|
+
* **Reliable detection (returns error with detectable message):**
|
|
14
|
+
* - Anthropic: "prompt is too long: X tokens > Y maximum"
|
|
15
|
+
* - OpenAI (Completions & Responses): "exceeds the context window"
|
|
16
|
+
* - Google Gemini: "input token count exceeds the maximum"
|
|
17
|
+
* - xAI (Grok): "maximum prompt length is X but request contains Y"
|
|
18
|
+
* - Groq: "reduce the length of the messages"
|
|
19
|
+
* - Cerebras: 400/413 status code (no body)
|
|
20
|
+
* - Mistral: "Prompt contains X tokens ... too large for model with Y maximum context length"
|
|
21
|
+
* - OpenRouter (all backends): "maximum context length is X tokens"
|
|
22
|
+
* - llama.cpp: "exceeds the available context size"
|
|
23
|
+
* - LM Studio: "greater than the context length"
|
|
24
|
+
* - Kimi For Coding: "exceeded model token limit: X (requested: Y)"
|
|
25
|
+
*
|
|
26
|
+
* **Unreliable detection:**
|
|
27
|
+
* - z.ai: Sometimes accepts overflow silently (detectable via usage.input > contextWindow),
|
|
28
|
+
* sometimes returns rate limit errors. Pass contextWindow param to detect silent overflow.
|
|
29
|
+
* - Ollama: May truncate input silently for some setups, but may also return explicit
|
|
30
|
+
* overflow errors that match the patterns above. Silent truncation still cannot be
|
|
31
|
+
* detected here because we do not know the expected token count.
|
|
32
|
+
*
|
|
33
|
+
* ## Custom Providers
|
|
34
|
+
*
|
|
35
|
+
* If you've added custom models via settings.json, this function may not detect
|
|
36
|
+
* overflow errors from those providers. To add support:
|
|
37
|
+
*
|
|
38
|
+
* 1. Send a request that exceeds the model's context window
|
|
39
|
+
* 2. Check the errorMessage in the response
|
|
40
|
+
* 3. Create a regex pattern that matches the error
|
|
41
|
+
* 4. The pattern should be added to OVERFLOW_PATTERNS in this file, or
|
|
42
|
+
* check the errorMessage yourself before calling this function
|
|
43
|
+
*
|
|
44
|
+
* @param message - The assistant message to check
|
|
45
|
+
* @param contextWindow - Optional context window size for detecting silent overflow (z.ai)
|
|
46
|
+
* @returns true if the message indicates a context overflow
|
|
47
|
+
*/
|
|
48
|
+
export declare function isContextOverflow(message: AssistantMessage, contextWindow?: number): boolean;
|
|
49
|
+
/**
|
|
50
|
+
* Get the overflow patterns for testing purposes.
|
|
51
|
+
*/
|
|
52
|
+
export declare function getOverflowPatterns(): RegExp[];
|
|
53
|
+
//# sourceMappingURL=overflow.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"overflow.d.ts","sourceRoot":"","sources":["../../src/utils/overflow.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,gBAAgB,EAAE,MAAM,aAAa,CAAC;AA+CpD;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;GA6CG;AACH,wBAAgB,iBAAiB,CAAC,OAAO,EAAE,gBAAgB,EAAE,aAAa,CAAC,EAAE,MAAM,GAAG,OAAO,CAwB5F;AAED;;GAEG;AACH,wBAAgB,mBAAmB,IAAI,MAAM,EAAE,CAE9C","sourcesContent":["import type { AssistantMessage } from \"../types.js\";\n\n/**\n * Regex patterns to detect context overflow errors from different providers.\n *\n * These patterns match error messages returned when the input exceeds\n * the model's context window.\n *\n * Provider-specific patterns (with example error messages):\n *\n * - Anthropic: \"prompt is too long: 213462 tokens > 200000 maximum\"\n * - OpenAI: \"Your input exceeds the context window of this model\"\n * - Google: \"The input token count (1196265) exceeds the maximum number of tokens allowed (1048575)\"\n * - xAI: \"This model's maximum prompt length is 131072 but the request contains 537812 tokens\"\n * - Groq: \"Please reduce the length of the messages or completion\"\n * - OpenRouter: \"This endpoint's maximum context length is X tokens. However, you requested about Y tokens\"\n * - llama.cpp: \"the request exceeds the available context size, try increasing it\"\n * - LM Studio: \"tokens to keep from the initial prompt is greater than the context length\"\n * - GitHub Copilot: \"prompt token count of X exceeds the limit of Y\"\n * - MiniMax: \"invalid params, context window exceeds limit\"\n * - Kimi For Coding: \"Your request exceeded model token limit: X (requested: Y)\"\n * - Cerebras: Returns \"400/413 status code (no body)\" - handled separately below\n * - Mistral: \"Prompt contains X tokens ... too large for model with Y maximum context length\"\n * - z.ai: Does NOT error, accepts overflow silently - handled via usage.input > contextWindow\n * - Ollama: Some deployments truncate silently, others return errors like \"prompt too long; exceeded max context length by X tokens\"\n */\nconst OVERFLOW_PATTERNS = [\n\t/prompt is too long/i, // Anthropic\n\t/input is too long for requested model/i, // Amazon Bedrock\n\t/exceeds the context window/i, // OpenAI (Completions & Responses API)\n\t/input token count.*exceeds the maximum/i, // Google (Gemini)\n\t/maximum prompt length is \\d+/i, // xAI (Grok)\n\t/reduce the length of the messages/i, // Groq\n\t/maximum context length is \\d+ tokens/i, // OpenRouter (all backends)\n\t/exceeds the limit of \\d+/i, // GitHub Copilot\n\t/exceeds the available context size/i, // llama.cpp server\n\t/greater than the context length/i, // LM Studio\n\t/context window exceeds limit/i, // MiniMax\n\t/exceeded model token limit/i, // Kimi For Coding\n\t/too large for model with \\d+ maximum context length/i, // Mistral\n\t/model_context_window_exceeded/i, // z.ai non-standard finish_reason surfaced as error text\n\t/prompt too long; exceeded (?:max )?context length/i, // Ollama explicit overflow error\n\t/context[_ ]length[_ ]exceeded/i, // Generic fallback\n\t/too many tokens/i, // Generic fallback\n\t/token limit exceeded/i, // Generic fallback\n];\n\n/**\n * Check if an assistant message represents a context overflow error.\n *\n * This handles two cases:\n * 1. Error-based overflow: Most providers return stopReason \"error\" with a\n * specific error message pattern.\n * 2. Silent overflow: Some providers accept overflow requests and return\n * successfully. For these, we check if usage.input exceeds the context window.\n *\n * ## Reliability by Provider\n *\n * **Reliable detection (returns error with detectable message):**\n * - Anthropic: \"prompt is too long: X tokens > Y maximum\"\n * - OpenAI (Completions & Responses): \"exceeds the context window\"\n * - Google Gemini: \"input token count exceeds the maximum\"\n * - xAI (Grok): \"maximum prompt length is X but request contains Y\"\n * - Groq: \"reduce the length of the messages\"\n * - Cerebras: 400/413 status code (no body)\n * - Mistral: \"Prompt contains X tokens ... too large for model with Y maximum context length\"\n * - OpenRouter (all backends): \"maximum context length is X tokens\"\n * - llama.cpp: \"exceeds the available context size\"\n * - LM Studio: \"greater than the context length\"\n * - Kimi For Coding: \"exceeded model token limit: X (requested: Y)\"\n *\n * **Unreliable detection:**\n * - z.ai: Sometimes accepts overflow silently (detectable via usage.input > contextWindow),\n * sometimes returns rate limit errors. Pass contextWindow param to detect silent overflow.\n * - Ollama: May truncate input silently for some setups, but may also return explicit\n * overflow errors that match the patterns above. Silent truncation still cannot be\n * detected here because we do not know the expected token count.\n *\n * ## Custom Providers\n *\n * If you've added custom models via settings.json, this function may not detect\n * overflow errors from those providers. To add support:\n *\n * 1. Send a request that exceeds the model's context window\n * 2. Check the errorMessage in the response\n * 3. Create a regex pattern that matches the error\n * 4. The pattern should be added to OVERFLOW_PATTERNS in this file, or\n * check the errorMessage yourself before calling this function\n *\n * @param message - The assistant message to check\n * @param contextWindow - Optional context window size for detecting silent overflow (z.ai)\n * @returns true if the message indicates a context overflow\n */\nexport function isContextOverflow(message: AssistantMessage, contextWindow?: number): boolean {\n\t// Case 1: Check error message patterns\n\tif (message.stopReason === \"error\" && message.errorMessage) {\n\t\t// Check known patterns\n\t\tif (OVERFLOW_PATTERNS.some((p) => p.test(message.errorMessage!))) {\n\t\t\treturn true;\n\t\t}\n\n\t\t// Cerebras returns 400/413 with no body for context overflow\n\t\t// Note: 429 is rate limiting (requests/tokens per time), NOT context overflow\n\t\tif (/^4(00|13)\\s*(status code)?\\s*\\(no body\\)/i.test(message.errorMessage)) {\n\t\t\treturn true;\n\t\t}\n\t}\n\n\t// Case 2: Silent overflow (z.ai style) - successful but usage exceeds context\n\tif (contextWindow && message.stopReason === \"stop\") {\n\t\tconst inputTokens = message.usage.input + message.usage.cacheRead;\n\t\tif (inputTokens > contextWindow) {\n\t\t\treturn true;\n\t\t}\n\t}\n\n\treturn false;\n}\n\n/**\n * Get the overflow patterns for testing purposes.\n */\nexport function getOverflowPatterns(): RegExp[] {\n\treturn [...OVERFLOW_PATTERNS];\n}\n"]}
|
|
@@ -0,0 +1,119 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Regex patterns to detect context overflow errors from different providers.
|
|
3
|
+
*
|
|
4
|
+
* These patterns match error messages returned when the input exceeds
|
|
5
|
+
* the model's context window.
|
|
6
|
+
*
|
|
7
|
+
* Provider-specific patterns (with example error messages):
|
|
8
|
+
*
|
|
9
|
+
* - Anthropic: "prompt is too long: 213462 tokens > 200000 maximum"
|
|
10
|
+
* - OpenAI: "Your input exceeds the context window of this model"
|
|
11
|
+
* - Google: "The input token count (1196265) exceeds the maximum number of tokens allowed (1048575)"
|
|
12
|
+
* - xAI: "This model's maximum prompt length is 131072 but the request contains 537812 tokens"
|
|
13
|
+
* - Groq: "Please reduce the length of the messages or completion"
|
|
14
|
+
* - OpenRouter: "This endpoint's maximum context length is X tokens. However, you requested about Y tokens"
|
|
15
|
+
* - llama.cpp: "the request exceeds the available context size, try increasing it"
|
|
16
|
+
* - LM Studio: "tokens to keep from the initial prompt is greater than the context length"
|
|
17
|
+
* - GitHub Copilot: "prompt token count of X exceeds the limit of Y"
|
|
18
|
+
* - MiniMax: "invalid params, context window exceeds limit"
|
|
19
|
+
* - Kimi For Coding: "Your request exceeded model token limit: X (requested: Y)"
|
|
20
|
+
* - Cerebras: Returns "400/413 status code (no body)" - handled separately below
|
|
21
|
+
* - Mistral: "Prompt contains X tokens ... too large for model with Y maximum context length"
|
|
22
|
+
* - z.ai: Does NOT error, accepts overflow silently - handled via usage.input > contextWindow
|
|
23
|
+
* - Ollama: Some deployments truncate silently, others return errors like "prompt too long; exceeded max context length by X tokens"
|
|
24
|
+
*/
|
|
25
|
+
const OVERFLOW_PATTERNS = [
|
|
26
|
+
/prompt is too long/i, // Anthropic
|
|
27
|
+
/input is too long for requested model/i, // Amazon Bedrock
|
|
28
|
+
/exceeds the context window/i, // OpenAI (Completions & Responses API)
|
|
29
|
+
/input token count.*exceeds the maximum/i, // Google (Gemini)
|
|
30
|
+
/maximum prompt length is \d+/i, // xAI (Grok)
|
|
31
|
+
/reduce the length of the messages/i, // Groq
|
|
32
|
+
/maximum context length is \d+ tokens/i, // OpenRouter (all backends)
|
|
33
|
+
/exceeds the limit of \d+/i, // GitHub Copilot
|
|
34
|
+
/exceeds the available context size/i, // llama.cpp server
|
|
35
|
+
/greater than the context length/i, // LM Studio
|
|
36
|
+
/context window exceeds limit/i, // MiniMax
|
|
37
|
+
/exceeded model token limit/i, // Kimi For Coding
|
|
38
|
+
/too large for model with \d+ maximum context length/i, // Mistral
|
|
39
|
+
/model_context_window_exceeded/i, // z.ai non-standard finish_reason surfaced as error text
|
|
40
|
+
/prompt too long; exceeded (?:max )?context length/i, // Ollama explicit overflow error
|
|
41
|
+
/context[_ ]length[_ ]exceeded/i, // Generic fallback
|
|
42
|
+
/too many tokens/i, // Generic fallback
|
|
43
|
+
/token limit exceeded/i, // Generic fallback
|
|
44
|
+
];
|
|
45
|
+
/**
|
|
46
|
+
* Check if an assistant message represents a context overflow error.
|
|
47
|
+
*
|
|
48
|
+
* This handles two cases:
|
|
49
|
+
* 1. Error-based overflow: Most providers return stopReason "error" with a
|
|
50
|
+
* specific error message pattern.
|
|
51
|
+
* 2. Silent overflow: Some providers accept overflow requests and return
|
|
52
|
+
* successfully. For these, we check if usage.input exceeds the context window.
|
|
53
|
+
*
|
|
54
|
+
* ## Reliability by Provider
|
|
55
|
+
*
|
|
56
|
+
* **Reliable detection (returns error with detectable message):**
|
|
57
|
+
* - Anthropic: "prompt is too long: X tokens > Y maximum"
|
|
58
|
+
* - OpenAI (Completions & Responses): "exceeds the context window"
|
|
59
|
+
* - Google Gemini: "input token count exceeds the maximum"
|
|
60
|
+
* - xAI (Grok): "maximum prompt length is X but request contains Y"
|
|
61
|
+
* - Groq: "reduce the length of the messages"
|
|
62
|
+
* - Cerebras: 400/413 status code (no body)
|
|
63
|
+
* - Mistral: "Prompt contains X tokens ... too large for model with Y maximum context length"
|
|
64
|
+
* - OpenRouter (all backends): "maximum context length is X tokens"
|
|
65
|
+
* - llama.cpp: "exceeds the available context size"
|
|
66
|
+
* - LM Studio: "greater than the context length"
|
|
67
|
+
* - Kimi For Coding: "exceeded model token limit: X (requested: Y)"
|
|
68
|
+
*
|
|
69
|
+
* **Unreliable detection:**
|
|
70
|
+
* - z.ai: Sometimes accepts overflow silently (detectable via usage.input > contextWindow),
|
|
71
|
+
* sometimes returns rate limit errors. Pass contextWindow param to detect silent overflow.
|
|
72
|
+
* - Ollama: May truncate input silently for some setups, but may also return explicit
|
|
73
|
+
* overflow errors that match the patterns above. Silent truncation still cannot be
|
|
74
|
+
* detected here because we do not know the expected token count.
|
|
75
|
+
*
|
|
76
|
+
* ## Custom Providers
|
|
77
|
+
*
|
|
78
|
+
* If you've added custom models via settings.json, this function may not detect
|
|
79
|
+
* overflow errors from those providers. To add support:
|
|
80
|
+
*
|
|
81
|
+
* 1. Send a request that exceeds the model's context window
|
|
82
|
+
* 2. Check the errorMessage in the response
|
|
83
|
+
* 3. Create a regex pattern that matches the error
|
|
84
|
+
* 4. The pattern should be added to OVERFLOW_PATTERNS in this file, or
|
|
85
|
+
* check the errorMessage yourself before calling this function
|
|
86
|
+
*
|
|
87
|
+
* @param message - The assistant message to check
|
|
88
|
+
* @param contextWindow - Optional context window size for detecting silent overflow (z.ai)
|
|
89
|
+
* @returns true if the message indicates a context overflow
|
|
90
|
+
*/
|
|
91
|
+
export function isContextOverflow(message, contextWindow) {
|
|
92
|
+
// Case 1: Check error message patterns
|
|
93
|
+
if (message.stopReason === "error" && message.errorMessage) {
|
|
94
|
+
// Check known patterns
|
|
95
|
+
if (OVERFLOW_PATTERNS.some((p) => p.test(message.errorMessage))) {
|
|
96
|
+
return true;
|
|
97
|
+
}
|
|
98
|
+
// Cerebras returns 400/413 with no body for context overflow
|
|
99
|
+
// Note: 429 is rate limiting (requests/tokens per time), NOT context overflow
|
|
100
|
+
if (/^4(00|13)\s*(status code)?\s*\(no body\)/i.test(message.errorMessage)) {
|
|
101
|
+
return true;
|
|
102
|
+
}
|
|
103
|
+
}
|
|
104
|
+
// Case 2: Silent overflow (z.ai style) - successful but usage exceeds context
|
|
105
|
+
if (contextWindow && message.stopReason === "stop") {
|
|
106
|
+
const inputTokens = message.usage.input + message.usage.cacheRead;
|
|
107
|
+
if (inputTokens > contextWindow) {
|
|
108
|
+
return true;
|
|
109
|
+
}
|
|
110
|
+
}
|
|
111
|
+
return false;
|
|
112
|
+
}
|
|
113
|
+
/**
|
|
114
|
+
* Get the overflow patterns for testing purposes.
|
|
115
|
+
*/
|
|
116
|
+
export function getOverflowPatterns() {
|
|
117
|
+
return [...OVERFLOW_PATTERNS];
|
|
118
|
+
}
|
|
119
|
+
//# sourceMappingURL=overflow.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"overflow.js","sourceRoot":"","sources":["../../src/utils/overflow.ts"],"names":[],"mappings":"AAEA;;;;;;;;;;;;;;;;;;;;;;;GAuBG;AACH,MAAM,iBAAiB,GAAG;IACzB,qBAAqB,EAAE,YAAY;IACnC,wCAAwC,EAAE,iBAAiB;IAC3D,6BAA6B,EAAE,uCAAuC;IACtE,yCAAyC,EAAE,kBAAkB;IAC7D,+BAA+B,EAAE,aAAa;IAC9C,oCAAoC,EAAE,OAAO;IAC7C,uCAAuC,EAAE,4BAA4B;IACrE,2BAA2B,EAAE,iBAAiB;IAC9C,qCAAqC,EAAE,mBAAmB;IAC1D,kCAAkC,EAAE,YAAY;IAChD,+BAA+B,EAAE,UAAU;IAC3C,6BAA6B,EAAE,kBAAkB;IACjD,sDAAsD,EAAE,UAAU;IAClE,gCAAgC,EAAE,yDAAyD;IAC3F,oDAAoD,EAAE,iCAAiC;IACvF,gCAAgC,EAAE,mBAAmB;IACrD,kBAAkB,EAAE,mBAAmB;IACvC,uBAAuB,EAAE,mBAAmB;CAC5C,CAAC;AAEF;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;GA6CG;AACH,MAAM,UAAU,iBAAiB,CAAC,OAAyB,EAAE,aAAsB,EAAW;IAC7F,uCAAuC;IACvC,IAAI,OAAO,CAAC,UAAU,KAAK,OAAO,IAAI,OAAO,CAAC,YAAY,EAAE,CAAC;QAC5D,uBAAuB;QACvB,IAAI,iBAAiB,CAAC,IAAI,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,OAAO,CAAC,YAAa,CAAC,CAAC,EAAE,CAAC;YAClE,OAAO,IAAI,CAAC;QACb,CAAC;QAED,6DAA6D;QAC7D,8EAA8E;QAC9E,IAAI,2CAA2C,CAAC,IAAI,CAAC,OAAO,CAAC,YAAY,CAAC,EAAE,CAAC;YAC5E,OAAO,IAAI,CAAC;QACb,CAAC;IACF,CAAC;IAED,8EAA8E;IAC9E,IAAI,aAAa,IAAI,OAAO,CAAC,UAAU,KAAK,MAAM,EAAE,CAAC;QACpD,MAAM,WAAW,GAAG,OAAO,CAAC,KAAK,CAAC,KAAK,GAAG,OAAO,CAAC,KAAK,CAAC,SAAS,CAAC;QAClE,IAAI,WAAW,GAAG,aAAa,EAAE,CAAC;YACjC,OAAO,IAAI,CAAC;QACb,CAAC;IACF,CAAC;IAED,OAAO,KAAK,CAAC;AAAA,CACb;AAED;;GAEG;AACH,MAAM,UAAU,mBAAmB,GAAa;IAC/C,OAAO,CAAC,GAAG,iBAAiB,CAAC,CAAC;AAAA,CAC9B","sourcesContent":["import type { AssistantMessage } from \"../types.js\";\n\n/**\n * Regex patterns to detect context overflow errors from different providers.\n *\n * These patterns match error messages returned when the input exceeds\n * the model's context window.\n *\n * Provider-specific patterns (with example error messages):\n *\n * - Anthropic: \"prompt is too long: 213462 tokens > 200000 maximum\"\n * - OpenAI: \"Your input exceeds the context window of this model\"\n * - Google: \"The input token count (1196265) exceeds the maximum number of tokens allowed (1048575)\"\n * - xAI: \"This model's maximum prompt length is 131072 but the request contains 537812 tokens\"\n * - Groq: \"Please reduce the length of the messages or completion\"\n * - OpenRouter: \"This endpoint's maximum context length is X tokens. However, you requested about Y tokens\"\n * - llama.cpp: \"the request exceeds the available context size, try increasing it\"\n * - LM Studio: \"tokens to keep from the initial prompt is greater than the context length\"\n * - GitHub Copilot: \"prompt token count of X exceeds the limit of Y\"\n * - MiniMax: \"invalid params, context window exceeds limit\"\n * - Kimi For Coding: \"Your request exceeded model token limit: X (requested: Y)\"\n * - Cerebras: Returns \"400/413 status code (no body)\" - handled separately below\n * - Mistral: \"Prompt contains X tokens ... too large for model with Y maximum context length\"\n * - z.ai: Does NOT error, accepts overflow silently - handled via usage.input > contextWindow\n * - Ollama: Some deployments truncate silently, others return errors like \"prompt too long; exceeded max context length by X tokens\"\n */\nconst OVERFLOW_PATTERNS = [\n\t/prompt is too long/i, // Anthropic\n\t/input is too long for requested model/i, // Amazon Bedrock\n\t/exceeds the context window/i, // OpenAI (Completions & Responses API)\n\t/input token count.*exceeds the maximum/i, // Google (Gemini)\n\t/maximum prompt length is \\d+/i, // xAI (Grok)\n\t/reduce the length of the messages/i, // Groq\n\t/maximum context length is \\d+ tokens/i, // OpenRouter (all backends)\n\t/exceeds the limit of \\d+/i, // GitHub Copilot\n\t/exceeds the available context size/i, // llama.cpp server\n\t/greater than the context length/i, // LM Studio\n\t/context window exceeds limit/i, // MiniMax\n\t/exceeded model token limit/i, // Kimi For Coding\n\t/too large for model with \\d+ maximum context length/i, // Mistral\n\t/model_context_window_exceeded/i, // z.ai non-standard finish_reason surfaced as error text\n\t/prompt too long; exceeded (?:max )?context length/i, // Ollama explicit overflow error\n\t/context[_ ]length[_ ]exceeded/i, // Generic fallback\n\t/too many tokens/i, // Generic fallback\n\t/token limit exceeded/i, // Generic fallback\n];\n\n/**\n * Check if an assistant message represents a context overflow error.\n *\n * This handles two cases:\n * 1. Error-based overflow: Most providers return stopReason \"error\" with a\n * specific error message pattern.\n * 2. Silent overflow: Some providers accept overflow requests and return\n * successfully. For these, we check if usage.input exceeds the context window.\n *\n * ## Reliability by Provider\n *\n * **Reliable detection (returns error with detectable message):**\n * - Anthropic: \"prompt is too long: X tokens > Y maximum\"\n * - OpenAI (Completions & Responses): \"exceeds the context window\"\n * - Google Gemini: \"input token count exceeds the maximum\"\n * - xAI (Grok): \"maximum prompt length is X but request contains Y\"\n * - Groq: \"reduce the length of the messages\"\n * - Cerebras: 400/413 status code (no body)\n * - Mistral: \"Prompt contains X tokens ... too large for model with Y maximum context length\"\n * - OpenRouter (all backends): \"maximum context length is X tokens\"\n * - llama.cpp: \"exceeds the available context size\"\n * - LM Studio: \"greater than the context length\"\n * - Kimi For Coding: \"exceeded model token limit: X (requested: Y)\"\n *\n * **Unreliable detection:**\n * - z.ai: Sometimes accepts overflow silently (detectable via usage.input > contextWindow),\n * sometimes returns rate limit errors. Pass contextWindow param to detect silent overflow.\n * - Ollama: May truncate input silently for some setups, but may also return explicit\n * overflow errors that match the patterns above. Silent truncation still cannot be\n * detected here because we do not know the expected token count.\n *\n * ## Custom Providers\n *\n * If you've added custom models via settings.json, this function may not detect\n * overflow errors from those providers. To add support:\n *\n * 1. Send a request that exceeds the model's context window\n * 2. Check the errorMessage in the response\n * 3. Create a regex pattern that matches the error\n * 4. The pattern should be added to OVERFLOW_PATTERNS in this file, or\n * check the errorMessage yourself before calling this function\n *\n * @param message - The assistant message to check\n * @param contextWindow - Optional context window size for detecting silent overflow (z.ai)\n * @returns true if the message indicates a context overflow\n */\nexport function isContextOverflow(message: AssistantMessage, contextWindow?: number): boolean {\n\t// Case 1: Check error message patterns\n\tif (message.stopReason === \"error\" && message.errorMessage) {\n\t\t// Check known patterns\n\t\tif (OVERFLOW_PATTERNS.some((p) => p.test(message.errorMessage!))) {\n\t\t\treturn true;\n\t\t}\n\n\t\t// Cerebras returns 400/413 with no body for context overflow\n\t\t// Note: 429 is rate limiting (requests/tokens per time), NOT context overflow\n\t\tif (/^4(00|13)\\s*(status code)?\\s*\\(no body\\)/i.test(message.errorMessage)) {\n\t\t\treturn true;\n\t\t}\n\t}\n\n\t// Case 2: Silent overflow (z.ai style) - successful but usage exceeds context\n\tif (contextWindow && message.stopReason === \"stop\") {\n\t\tconst inputTokens = message.usage.input + message.usage.cacheRead;\n\t\tif (inputTokens > contextWindow) {\n\t\t\treturn true;\n\t\t}\n\t}\n\n\treturn false;\n}\n\n/**\n * Get the overflow patterns for testing purposes.\n */\nexport function getOverflowPatterns(): RegExp[] {\n\treturn [...OVERFLOW_PATTERNS];\n}\n"]}
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Removes unpaired Unicode surrogate characters from a string.
|
|
3
|
+
*
|
|
4
|
+
* Unpaired surrogates (high surrogates 0xD800-0xDBFF without matching low surrogates 0xDC00-0xDFFF,
|
|
5
|
+
* or vice versa) cause JSON serialization errors in many API providers.
|
|
6
|
+
*
|
|
7
|
+
* Valid emoji and other characters outside the Basic Multilingual Plane use properly paired
|
|
8
|
+
* surrogates and will NOT be affected by this function.
|
|
9
|
+
*
|
|
10
|
+
* @param text - The text to sanitize
|
|
11
|
+
* @returns The sanitized text with unpaired surrogates removed
|
|
12
|
+
*
|
|
13
|
+
* @example
|
|
14
|
+
* // Valid emoji (properly paired surrogates) are preserved
|
|
15
|
+
* sanitizeSurrogates("Hello 🙈 World") // => "Hello 🙈 World"
|
|
16
|
+
*
|
|
17
|
+
* // Unpaired high surrogate is removed
|
|
18
|
+
* const unpaired = String.fromCharCode(0xD83D); // high surrogate without low
|
|
19
|
+
* sanitizeSurrogates(`Text ${unpaired} here`) // => "Text here"
|
|
20
|
+
*/
|
|
21
|
+
export declare function sanitizeSurrogates(text: string): string;
|
|
22
|
+
//# sourceMappingURL=sanitize-unicode.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"sanitize-unicode.d.ts","sourceRoot":"","sources":["../../src/utils/sanitize-unicode.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;;;;;;;;;;GAmBG;AACH,wBAAgB,kBAAkB,CAAC,IAAI,EAAE,MAAM,GAAG,MAAM,CAIvD","sourcesContent":["/**\n * Removes unpaired Unicode surrogate characters from a string.\n *\n * Unpaired surrogates (high surrogates 0xD800-0xDBFF without matching low surrogates 0xDC00-0xDFFF,\n * or vice versa) cause JSON serialization errors in many API providers.\n *\n * Valid emoji and other characters outside the Basic Multilingual Plane use properly paired\n * surrogates and will NOT be affected by this function.\n *\n * @param text - The text to sanitize\n * @returns The sanitized text with unpaired surrogates removed\n *\n * @example\n * // Valid emoji (properly paired surrogates) are preserved\n * sanitizeSurrogates(\"Hello 🙈 World\") // => \"Hello 🙈 World\"\n *\n * // Unpaired high surrogate is removed\n * const unpaired = String.fromCharCode(0xD83D); // high surrogate without low\n * sanitizeSurrogates(`Text ${unpaired} here`) // => \"Text here\"\n */\nexport function sanitizeSurrogates(text: string): string {\n\t// Replace unpaired high surrogates (0xD800-0xDBFF not followed by low surrogate)\n\t// Replace unpaired low surrogates (0xDC00-0xDFFF not preceded by high surrogate)\n\treturn text.replace(/[\\uD800-\\uDBFF](?![\\uDC00-\\uDFFF])|(?<![\\uD800-\\uDBFF])[\\uDC00-\\uDFFF]/g, \"\");\n}\n"]}
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Removes unpaired Unicode surrogate characters from a string.
|
|
3
|
+
*
|
|
4
|
+
* Unpaired surrogates (high surrogates 0xD800-0xDBFF without matching low surrogates 0xDC00-0xDFFF,
|
|
5
|
+
* or vice versa) cause JSON serialization errors in many API providers.
|
|
6
|
+
*
|
|
7
|
+
* Valid emoji and other characters outside the Basic Multilingual Plane use properly paired
|
|
8
|
+
* surrogates and will NOT be affected by this function.
|
|
9
|
+
*
|
|
10
|
+
* @param text - The text to sanitize
|
|
11
|
+
* @returns The sanitized text with unpaired surrogates removed
|
|
12
|
+
*
|
|
13
|
+
* @example
|
|
14
|
+
* // Valid emoji (properly paired surrogates) are preserved
|
|
15
|
+
* sanitizeSurrogates("Hello 🙈 World") // => "Hello 🙈 World"
|
|
16
|
+
*
|
|
17
|
+
* // Unpaired high surrogate is removed
|
|
18
|
+
* const unpaired = String.fromCharCode(0xD83D); // high surrogate without low
|
|
19
|
+
* sanitizeSurrogates(`Text ${unpaired} here`) // => "Text here"
|
|
20
|
+
*/
|
|
21
|
+
export function sanitizeSurrogates(text) {
|
|
22
|
+
// Replace unpaired high surrogates (0xD800-0xDBFF not followed by low surrogate)
|
|
23
|
+
// Replace unpaired low surrogates (0xDC00-0xDFFF not preceded by high surrogate)
|
|
24
|
+
return text.replace(/[\uD800-\uDBFF](?![\uDC00-\uDFFF])|(?<![\uD800-\uDBFF])[\uDC00-\uDFFF]/g, "");
|
|
25
|
+
}
|
|
26
|
+
//# sourceMappingURL=sanitize-unicode.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"sanitize-unicode.js","sourceRoot":"","sources":["../../src/utils/sanitize-unicode.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;;;;;;;;;;GAmBG;AACH,MAAM,UAAU,kBAAkB,CAAC,IAAY,EAAU;IACxD,iFAAiF;IACjF,iFAAiF;IACjF,OAAO,IAAI,CAAC,OAAO,CAAC,yEAAyE,EAAE,EAAE,CAAC,CAAC;AAAA,CACnG","sourcesContent":["/**\n * Removes unpaired Unicode surrogate characters from a string.\n *\n * Unpaired surrogates (high surrogates 0xD800-0xDBFF without matching low surrogates 0xDC00-0xDFFF,\n * or vice versa) cause JSON serialization errors in many API providers.\n *\n * Valid emoji and other characters outside the Basic Multilingual Plane use properly paired\n * surrogates and will NOT be affected by this function.\n *\n * @param text - The text to sanitize\n * @returns The sanitized text with unpaired surrogates removed\n *\n * @example\n * // Valid emoji (properly paired surrogates) are preserved\n * sanitizeSurrogates(\"Hello 🙈 World\") // => \"Hello 🙈 World\"\n *\n * // Unpaired high surrogate is removed\n * const unpaired = String.fromCharCode(0xD83D); // high surrogate without low\n * sanitizeSurrogates(`Text ${unpaired} here`) // => \"Text here\"\n */\nexport function sanitizeSurrogates(text: string): string {\n\t// Replace unpaired high surrogates (0xD800-0xDBFF not followed by low surrogate)\n\t// Replace unpaired low surrogates (0xDC00-0xDFFF not preceded by high surrogate)\n\treturn text.replace(/[\\uD800-\\uDBFF](?![\\uDC00-\\uDFFF])|(?<![\\uD800-\\uDBFF])[\\uDC00-\\uDFFF]/g, \"\");\n}\n"]}
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
import { type TUnsafe } from "@sinclair/typebox";
|
|
2
|
+
/**
|
|
3
|
+
* Creates a string enum schema compatible with Google's API and other providers
|
|
4
|
+
* that don't support anyOf/const patterns.
|
|
5
|
+
*
|
|
6
|
+
* @example
|
|
7
|
+
* const OperationSchema = StringEnum(["add", "subtract", "multiply", "divide"], {
|
|
8
|
+
* description: "The operation to perform"
|
|
9
|
+
* });
|
|
10
|
+
*
|
|
11
|
+
* type Operation = Static<typeof OperationSchema>; // "add" | "subtract" | "multiply" | "divide"
|
|
12
|
+
*/
|
|
13
|
+
export declare function StringEnum<T extends readonly string[]>(values: T, options?: {
|
|
14
|
+
description?: string;
|
|
15
|
+
default?: T[number];
|
|
16
|
+
}): TUnsafe<T[number]>;
|
|
17
|
+
//# sourceMappingURL=typebox-helpers.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"typebox-helpers.d.ts","sourceRoot":"","sources":["../../src/utils/typebox-helpers.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,KAAK,OAAO,EAAQ,MAAM,mBAAmB,CAAC;AAEvD;;;;;;;;;;GAUG;AACH,wBAAgB,UAAU,CAAC,CAAC,SAAS,SAAS,MAAM,EAAE,EACrD,MAAM,EAAE,CAAC,EACT,OAAO,CAAC,EAAE;IAAE,WAAW,CAAC,EAAE,MAAM,CAAC;IAAC,OAAO,CAAC,EAAE,CAAC,CAAC,MAAM,CAAC,CAAA;CAAE,GACrD,OAAO,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAOpB","sourcesContent":["import { type TUnsafe, Type } from \"@sinclair/typebox\";\n\n/**\n * Creates a string enum schema compatible with Google's API and other providers\n * that don't support anyOf/const patterns.\n *\n * @example\n * const OperationSchema = StringEnum([\"add\", \"subtract\", \"multiply\", \"divide\"], {\n * description: \"The operation to perform\"\n * });\n *\n * type Operation = Static<typeof OperationSchema>; // \"add\" | \"subtract\" | \"multiply\" | \"divide\"\n */\nexport function StringEnum<T extends readonly string[]>(\n\tvalues: T,\n\toptions?: { description?: string; default?: T[number] },\n): TUnsafe<T[number]> {\n\treturn Type.Unsafe<T[number]>({\n\t\ttype: \"string\",\n\t\tenum: values as any,\n\t\t...(options?.description && { description: options.description }),\n\t\t...(options?.default && { default: options.default }),\n\t});\n}\n"]}
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
import { Type } from "@sinclair/typebox";
|
|
2
|
+
/**
|
|
3
|
+
* Creates a string enum schema compatible with Google's API and other providers
|
|
4
|
+
* that don't support anyOf/const patterns.
|
|
5
|
+
*
|
|
6
|
+
* @example
|
|
7
|
+
* const OperationSchema = StringEnum(["add", "subtract", "multiply", "divide"], {
|
|
8
|
+
* description: "The operation to perform"
|
|
9
|
+
* });
|
|
10
|
+
*
|
|
11
|
+
* type Operation = Static<typeof OperationSchema>; // "add" | "subtract" | "multiply" | "divide"
|
|
12
|
+
*/
|
|
13
|
+
export function StringEnum(values, options) {
|
|
14
|
+
return Type.Unsafe({
|
|
15
|
+
type: "string",
|
|
16
|
+
enum: values,
|
|
17
|
+
...(options?.description && { description: options.description }),
|
|
18
|
+
...(options?.default && { default: options.default }),
|
|
19
|
+
});
|
|
20
|
+
}
|
|
21
|
+
//# sourceMappingURL=typebox-helpers.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"typebox-helpers.js","sourceRoot":"","sources":["../../src/utils/typebox-helpers.ts"],"names":[],"mappings":"AAAA,OAAO,EAAgB,IAAI,EAAE,MAAM,mBAAmB,CAAC;AAEvD;;;;;;;;;;GAUG;AACH,MAAM,UAAU,UAAU,CACzB,MAAS,EACT,OAAuD,EAClC;IACrB,OAAO,IAAI,CAAC,MAAM,CAAY;QAC7B,IAAI,EAAE,QAAQ;QACd,IAAI,EAAE,MAAa;QACnB,GAAG,CAAC,OAAO,EAAE,WAAW,IAAI,EAAE,WAAW,EAAE,OAAO,CAAC,WAAW,EAAE,CAAC;QACjE,GAAG,CAAC,OAAO,EAAE,OAAO,IAAI,EAAE,OAAO,EAAE,OAAO,CAAC,OAAO,EAAE,CAAC;KACrD,CAAC,CAAC;AAAA,CACH","sourcesContent":["import { type TUnsafe, Type } from \"@sinclair/typebox\";\n\n/**\n * Creates a string enum schema compatible with Google's API and other providers\n * that don't support anyOf/const patterns.\n *\n * @example\n * const OperationSchema = StringEnum([\"add\", \"subtract\", \"multiply\", \"divide\"], {\n * description: \"The operation to perform\"\n * });\n *\n * type Operation = Static<typeof OperationSchema>; // \"add\" | \"subtract\" | \"multiply\" | \"divide\"\n */\nexport function StringEnum<T extends readonly string[]>(\n\tvalues: T,\n\toptions?: { description?: string; default?: T[number] },\n): TUnsafe<T[number]> {\n\treturn Type.Unsafe<T[number]>({\n\t\ttype: \"string\",\n\t\tenum: values as any,\n\t\t...(options?.description && { description: options.description }),\n\t\t...(options?.default && { default: options.default }),\n\t});\n}\n"]}
|
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
import type { Tool, ToolCall } from "../types.js";
|
|
2
|
+
/**
|
|
3
|
+
* Finds a tool by name and validates the tool call arguments against its TypeBox schema
|
|
4
|
+
* @param tools Array of tool definitions
|
|
5
|
+
* @param toolCall The tool call from the LLM
|
|
6
|
+
* @returns The validated arguments
|
|
7
|
+
* @throws Error if tool is not found or validation fails
|
|
8
|
+
*/
|
|
9
|
+
export declare function validateToolCall(tools: Tool[], toolCall: ToolCall): any;
|
|
10
|
+
/**
|
|
11
|
+
* Validates tool call arguments against the tool's TypeBox schema
|
|
12
|
+
* @param tool The tool definition with TypeBox schema
|
|
13
|
+
* @param toolCall The tool call from the LLM
|
|
14
|
+
* @returns The validated (and potentially coerced) arguments
|
|
15
|
+
* @throws Error with formatted message if validation fails
|
|
16
|
+
*/
|
|
17
|
+
export declare function validateToolArguments(tool: Tool, toolCall: ToolCall): any;
|
|
18
|
+
//# sourceMappingURL=validation.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"validation.d.ts","sourceRoot":"","sources":["../../src/utils/validation.ts"],"names":[],"mappings":"AAOA,OAAO,KAAK,EAAE,IAAI,EAAE,QAAQ,EAAE,MAAM,aAAa,CAAC;AAkClD;;;;;;GAMG;AACH,wBAAgB,gBAAgB,CAAC,KAAK,EAAE,IAAI,EAAE,EAAE,QAAQ,EAAE,QAAQ,GAAG,GAAG,CAMvE;AAED;;;;;;GAMG;AACH,wBAAgB,qBAAqB,CAAC,IAAI,EAAE,IAAI,EAAE,QAAQ,EAAE,QAAQ,GAAG,GAAG,CA6BzE","sourcesContent":["import AjvModule from \"ajv\";\nimport addFormatsModule from \"ajv-formats\";\n\n// Handle both default and named exports\nconst Ajv = (AjvModule as any).default || AjvModule;\nconst addFormats = (addFormatsModule as any).default || addFormatsModule;\n\nimport type { Tool, ToolCall } from \"../types.js\";\n\n// Detect if we're in a browser extension environment with strict CSP\n// Chrome extensions with Manifest V3 don't allow eval/Function constructor\nconst isBrowserExtension = typeof globalThis !== \"undefined\" && (globalThis as any).chrome?.runtime?.id !== undefined;\n\nfunction canUseRuntimeCodegen(): boolean {\n\tif (isBrowserExtension) {\n\t\treturn false;\n\t}\n\n\ttry {\n\t\tnew Function(\"return true;\");\n\t\treturn true;\n\t} catch {\n\t\treturn false;\n\t}\n}\n\n// Create a singleton AJV instance with formats only when runtime code generation is available.\nlet ajv: any = null;\nif (canUseRuntimeCodegen()) {\n\ttry {\n\t\tajv = new Ajv({\n\t\t\tallErrors: true,\n\t\t\tstrict: false,\n\t\t\tcoerceTypes: true,\n\t\t});\n\t\taddFormats(ajv);\n\t} catch (_e) {\n\t\tconsole.warn(\"AJV validation disabled due to CSP restrictions\");\n\t}\n}\n\n/**\n * Finds a tool by name and validates the tool call arguments against its TypeBox schema\n * @param tools Array of tool definitions\n * @param toolCall The tool call from the LLM\n * @returns The validated arguments\n * @throws Error if tool is not found or validation fails\n */\nexport function validateToolCall(tools: Tool[], toolCall: ToolCall): any {\n\tconst tool = tools.find((t) => t.name === toolCall.name);\n\tif (!tool) {\n\t\tthrow new Error(`Tool \"${toolCall.name}\" not found`);\n\t}\n\treturn validateToolArguments(tool, toolCall);\n}\n\n/**\n * Validates tool call arguments against the tool's TypeBox schema\n * @param tool The tool definition with TypeBox schema\n * @param toolCall The tool call from the LLM\n * @returns The validated (and potentially coerced) arguments\n * @throws Error with formatted message if validation fails\n */\nexport function validateToolArguments(tool: Tool, toolCall: ToolCall): any {\n\t// Skip validation in environments where runtime code generation is unavailable.\n\tif (!ajv || !canUseRuntimeCodegen()) {\n\t\treturn toolCall.arguments;\n\t}\n\n\t// Compile the schema.\n\tconst validate = ajv.compile(tool.parameters);\n\n\t// Clone arguments so AJV can safely mutate for type coercion\n\tconst args = structuredClone(toolCall.arguments);\n\n\t// Validate the arguments (AJV mutates args in-place for type coercion)\n\tif (validate(args)) {\n\t\treturn args;\n\t}\n\n\t// Format validation errors nicely\n\tconst errors =\n\t\tvalidate.errors\n\t\t\t?.map((err: any) => {\n\t\t\t\tconst path = err.instancePath ? err.instancePath.substring(1) : err.params.missingProperty || \"root\";\n\t\t\t\treturn ` - ${path}: ${err.message}`;\n\t\t\t})\n\t\t\t.join(\"\\n\") || \"Unknown validation error\";\n\n\tconst errorMessage = `Validation failed for tool \"${toolCall.name}\":\\n${errors}\\n\\nReceived arguments:\\n${JSON.stringify(toolCall.arguments, null, 2)}`;\n\n\tthrow new Error(errorMessage);\n}\n"]}
|
|
@@ -0,0 +1,80 @@
|
|
|
1
|
+
import AjvModule from "ajv";
|
|
2
|
+
import addFormatsModule from "ajv-formats";
|
|
3
|
+
// Handle both default and named exports
|
|
4
|
+
const Ajv = AjvModule.default || AjvModule;
|
|
5
|
+
const addFormats = addFormatsModule.default || addFormatsModule;
|
|
6
|
+
// Detect if we're in a browser extension environment with strict CSP
|
|
7
|
+
// Chrome extensions with Manifest V3 don't allow eval/Function constructor
|
|
8
|
+
const isBrowserExtension = typeof globalThis !== "undefined" && globalThis.chrome?.runtime?.id !== undefined;
|
|
9
|
+
function canUseRuntimeCodegen() {
|
|
10
|
+
if (isBrowserExtension) {
|
|
11
|
+
return false;
|
|
12
|
+
}
|
|
13
|
+
try {
|
|
14
|
+
new Function("return true;");
|
|
15
|
+
return true;
|
|
16
|
+
}
|
|
17
|
+
catch {
|
|
18
|
+
return false;
|
|
19
|
+
}
|
|
20
|
+
}
|
|
21
|
+
// Create a singleton AJV instance with formats only when runtime code generation is available.
|
|
22
|
+
let ajv = null;
|
|
23
|
+
if (canUseRuntimeCodegen()) {
|
|
24
|
+
try {
|
|
25
|
+
ajv = new Ajv({
|
|
26
|
+
allErrors: true,
|
|
27
|
+
strict: false,
|
|
28
|
+
coerceTypes: true,
|
|
29
|
+
});
|
|
30
|
+
addFormats(ajv);
|
|
31
|
+
}
|
|
32
|
+
catch (_e) {
|
|
33
|
+
console.warn("AJV validation disabled due to CSP restrictions");
|
|
34
|
+
}
|
|
35
|
+
}
|
|
36
|
+
/**
|
|
37
|
+
* Finds a tool by name and validates the tool call arguments against its TypeBox schema
|
|
38
|
+
* @param tools Array of tool definitions
|
|
39
|
+
* @param toolCall The tool call from the LLM
|
|
40
|
+
* @returns The validated arguments
|
|
41
|
+
* @throws Error if tool is not found or validation fails
|
|
42
|
+
*/
|
|
43
|
+
export function validateToolCall(tools, toolCall) {
|
|
44
|
+
const tool = tools.find((t) => t.name === toolCall.name);
|
|
45
|
+
if (!tool) {
|
|
46
|
+
throw new Error(`Tool "${toolCall.name}" not found`);
|
|
47
|
+
}
|
|
48
|
+
return validateToolArguments(tool, toolCall);
|
|
49
|
+
}
|
|
50
|
+
/**
|
|
51
|
+
* Validates tool call arguments against the tool's TypeBox schema
|
|
52
|
+
* @param tool The tool definition with TypeBox schema
|
|
53
|
+
* @param toolCall The tool call from the LLM
|
|
54
|
+
* @returns The validated (and potentially coerced) arguments
|
|
55
|
+
* @throws Error with formatted message if validation fails
|
|
56
|
+
*/
|
|
57
|
+
export function validateToolArguments(tool, toolCall) {
|
|
58
|
+
// Skip validation in environments where runtime code generation is unavailable.
|
|
59
|
+
if (!ajv || !canUseRuntimeCodegen()) {
|
|
60
|
+
return toolCall.arguments;
|
|
61
|
+
}
|
|
62
|
+
// Compile the schema.
|
|
63
|
+
const validate = ajv.compile(tool.parameters);
|
|
64
|
+
// Clone arguments so AJV can safely mutate for type coercion
|
|
65
|
+
const args = structuredClone(toolCall.arguments);
|
|
66
|
+
// Validate the arguments (AJV mutates args in-place for type coercion)
|
|
67
|
+
if (validate(args)) {
|
|
68
|
+
return args;
|
|
69
|
+
}
|
|
70
|
+
// Format validation errors nicely
|
|
71
|
+
const errors = validate.errors
|
|
72
|
+
?.map((err) => {
|
|
73
|
+
const path = err.instancePath ? err.instancePath.substring(1) : err.params.missingProperty || "root";
|
|
74
|
+
return ` - ${path}: ${err.message}`;
|
|
75
|
+
})
|
|
76
|
+
.join("\n") || "Unknown validation error";
|
|
77
|
+
const errorMessage = `Validation failed for tool "${toolCall.name}":\n${errors}\n\nReceived arguments:\n${JSON.stringify(toolCall.arguments, null, 2)}`;
|
|
78
|
+
throw new Error(errorMessage);
|
|
79
|
+
}
|
|
80
|
+
//# sourceMappingURL=validation.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"validation.js","sourceRoot":"","sources":["../../src/utils/validation.ts"],"names":[],"mappings":"AAAA,OAAO,SAAS,MAAM,KAAK,CAAC;AAC5B,OAAO,gBAAgB,MAAM,aAAa,CAAC;AAE3C,wCAAwC;AACxC,MAAM,GAAG,GAAI,SAAiB,CAAC,OAAO,IAAI,SAAS,CAAC;AACpD,MAAM,UAAU,GAAI,gBAAwB,CAAC,OAAO,IAAI,gBAAgB,CAAC;AAIzE,qEAAqE;AACrE,2EAA2E;AAC3E,MAAM,kBAAkB,GAAG,OAAO,UAAU,KAAK,WAAW,IAAK,UAAkB,CAAC,MAAM,EAAE,OAAO,EAAE,EAAE,KAAK,SAAS,CAAC;AAEtH,SAAS,oBAAoB,GAAY;IACxC,IAAI,kBAAkB,EAAE,CAAC;QACxB,OAAO,KAAK,CAAC;IACd,CAAC;IAED,IAAI,CAAC;QACJ,IAAI,QAAQ,CAAC,cAAc,CAAC,CAAC;QAC7B,OAAO,IAAI,CAAC;IACb,CAAC;IAAC,MAAM,CAAC;QACR,OAAO,KAAK,CAAC;IACd,CAAC;AAAA,CACD;AAED,+FAA+F;AAC/F,IAAI,GAAG,GAAQ,IAAI,CAAC;AACpB,IAAI,oBAAoB,EAAE,EAAE,CAAC;IAC5B,IAAI,CAAC;QACJ,GAAG,GAAG,IAAI,GAAG,CAAC;YACb,SAAS,EAAE,IAAI;YACf,MAAM,EAAE,KAAK;YACb,WAAW,EAAE,IAAI;SACjB,CAAC,CAAC;QACH,UAAU,CAAC,GAAG,CAAC,CAAC;IACjB,CAAC;IAAC,OAAO,EAAE,EAAE,CAAC;QACb,OAAO,CAAC,IAAI,CAAC,iDAAiD,CAAC,CAAC;IACjE,CAAC;AACF,CAAC;AAED;;;;;;GAMG;AACH,MAAM,UAAU,gBAAgB,CAAC,KAAa,EAAE,QAAkB,EAAO;IACxE,MAAM,IAAI,GAAG,KAAK,CAAC,IAAI,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,IAAI,KAAK,QAAQ,CAAC,IAAI,CAAC,CAAC;IACzD,IAAI,CAAC,IAAI,EAAE,CAAC;QACX,MAAM,IAAI,KAAK,CAAC,SAAS,QAAQ,CAAC,IAAI,aAAa,CAAC,CAAC;IACtD,CAAC;IACD,OAAO,qBAAqB,CAAC,IAAI,EAAE,QAAQ,CAAC,CAAC;AAAA,CAC7C;AAED;;;;;;GAMG;AACH,MAAM,UAAU,qBAAqB,CAAC,IAAU,EAAE,QAAkB,EAAO;IAC1E,gFAAgF;IAChF,IAAI,CAAC,GAAG,IAAI,CAAC,oBAAoB,EAAE,EAAE,CAAC;QACrC,OAAO,QAAQ,CAAC,SAAS,CAAC;IAC3B,CAAC;IAED,sBAAsB;IACtB,MAAM,QAAQ,GAAG,GAAG,CAAC,OAAO,CAAC,IAAI,CAAC,UAAU,CAAC,CAAC;IAE9C,6DAA6D;IAC7D,MAAM,IAAI,GAAG,eAAe,CAAC,QAAQ,CAAC,SAAS,CAAC,CAAC;IAEjD,uEAAuE;IACvE,IAAI,QAAQ,CAAC,IAAI,CAAC,EAAE,CAAC;QACpB,OAAO,IAAI,CAAC;IACb,CAAC;IAED,kCAAkC;IAClC,MAAM,MAAM,GACX,QAAQ,CAAC,MAAM;QACd,EAAE,GAAG,CAAC,CAAC,GAAQ,EAAE,EAAE,CAAC;QACnB,MAAM,IAAI,GAAG,GAAG,CAAC,YAAY,CAAC,CAAC,CAAC,GAAG,CAAC,YAAY,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,MAAM,CAAC,eAAe,IAAI,MAAM,CAAC;QACrG,OAAO,OAAO,IAAI,KAAK,GAAG,CAAC,OAAO,EAAE,CAAC;IAAA,CACrC,CAAC;SACD,IAAI,CAAC,IAAI,CAAC,IAAI,0BAA0B,CAAC;IAE5C,MAAM,YAAY,GAAG,+BAA+B,QAAQ,CAAC,IAAI,OAAO,MAAM,4BAA4B,IAAI,CAAC,SAAS,CAAC,QAAQ,CAAC,SAAS,EAAE,IAAI,EAAE,CAAC,CAAC,EAAE,CAAC;IAExJ,MAAM,IAAI,KAAK,CAAC,YAAY,CAAC,CAAC;AAAA,CAC9B","sourcesContent":["import AjvModule from \"ajv\";\nimport addFormatsModule from \"ajv-formats\";\n\n// Handle both default and named exports\nconst Ajv = (AjvModule as any).default || AjvModule;\nconst addFormats = (addFormatsModule as any).default || addFormatsModule;\n\nimport type { Tool, ToolCall } from \"../types.js\";\n\n// Detect if we're in a browser extension environment with strict CSP\n// Chrome extensions with Manifest V3 don't allow eval/Function constructor\nconst isBrowserExtension = typeof globalThis !== \"undefined\" && (globalThis as any).chrome?.runtime?.id !== undefined;\n\nfunction canUseRuntimeCodegen(): boolean {\n\tif (isBrowserExtension) {\n\t\treturn false;\n\t}\n\n\ttry {\n\t\tnew Function(\"return true;\");\n\t\treturn true;\n\t} catch {\n\t\treturn false;\n\t}\n}\n\n// Create a singleton AJV instance with formats only when runtime code generation is available.\nlet ajv: any = null;\nif (canUseRuntimeCodegen()) {\n\ttry {\n\t\tajv = new Ajv({\n\t\t\tallErrors: true,\n\t\t\tstrict: false,\n\t\t\tcoerceTypes: true,\n\t\t});\n\t\taddFormats(ajv);\n\t} catch (_e) {\n\t\tconsole.warn(\"AJV validation disabled due to CSP restrictions\");\n\t}\n}\n\n/**\n * Finds a tool by name and validates the tool call arguments against its TypeBox schema\n * @param tools Array of tool definitions\n * @param toolCall The tool call from the LLM\n * @returns The validated arguments\n * @throws Error if tool is not found or validation fails\n */\nexport function validateToolCall(tools: Tool[], toolCall: ToolCall): any {\n\tconst tool = tools.find((t) => t.name === toolCall.name);\n\tif (!tool) {\n\t\tthrow new Error(`Tool \"${toolCall.name}\" not found`);\n\t}\n\treturn validateToolArguments(tool, toolCall);\n}\n\n/**\n * Validates tool call arguments against the tool's TypeBox schema\n * @param tool The tool definition with TypeBox schema\n * @param toolCall The tool call from the LLM\n * @returns The validated (and potentially coerced) arguments\n * @throws Error with formatted message if validation fails\n */\nexport function validateToolArguments(tool: Tool, toolCall: ToolCall): any {\n\t// Skip validation in environments where runtime code generation is unavailable.\n\tif (!ajv || !canUseRuntimeCodegen()) {\n\t\treturn toolCall.arguments;\n\t}\n\n\t// Compile the schema.\n\tconst validate = ajv.compile(tool.parameters);\n\n\t// Clone arguments so AJV can safely mutate for type coercion\n\tconst args = structuredClone(toolCall.arguments);\n\n\t// Validate the arguments (AJV mutates args in-place for type coercion)\n\tif (validate(args)) {\n\t\treturn args;\n\t}\n\n\t// Format validation errors nicely\n\tconst errors =\n\t\tvalidate.errors\n\t\t\t?.map((err: any) => {\n\t\t\t\tconst path = err.instancePath ? err.instancePath.substring(1) : err.params.missingProperty || \"root\";\n\t\t\t\treturn ` - ${path}: ${err.message}`;\n\t\t\t})\n\t\t\t.join(\"\\n\") || \"Unknown validation error\";\n\n\tconst errorMessage = `Validation failed for tool \"${toolCall.name}\":\\n${errors}\\n\\nReceived arguments:\\n${JSON.stringify(toolCall.arguments, null, 2)}`;\n\n\tthrow new Error(errorMessage);\n}\n"]}
|