@lenylvt/pi-ai 0.64.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +203 -0
- package/dist/api-registry.d.ts +20 -0
- package/dist/api-registry.d.ts.map +1 -0
- package/dist/api-registry.js +44 -0
- package/dist/api-registry.js.map +1 -0
- package/dist/cli.d.ts +3 -0
- package/dist/cli.d.ts.map +1 -0
- package/dist/cli.js +119 -0
- package/dist/cli.js.map +1 -0
- package/dist/env-api-keys.d.ts +7 -0
- package/dist/env-api-keys.d.ts.map +1 -0
- package/dist/env-api-keys.js +13 -0
- package/dist/env-api-keys.js.map +1 -0
- package/dist/index.d.ts +20 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +14 -0
- package/dist/index.js.map +1 -0
- package/dist/models.d.ts +24 -0
- package/dist/models.d.ts.map +1 -0
- package/dist/models.generated.d.ts +2332 -0
- package/dist/models.generated.d.ts.map +1 -0
- package/dist/models.generated.js +2186 -0
- package/dist/models.generated.js.map +1 -0
- package/dist/models.js +60 -0
- package/dist/models.js.map +1 -0
- package/dist/oauth.d.ts +2 -0
- package/dist/oauth.d.ts.map +1 -0
- package/dist/oauth.js +2 -0
- package/dist/oauth.js.map +1 -0
- package/dist/providers/anthropic.d.ts +40 -0
- package/dist/providers/anthropic.d.ts.map +1 -0
- package/dist/providers/anthropic.js +749 -0
- package/dist/providers/anthropic.js.map +1 -0
- package/dist/providers/faux.d.ts +56 -0
- package/dist/providers/faux.d.ts.map +1 -0
- package/dist/providers/faux.js +367 -0
- package/dist/providers/faux.js.map +1 -0
- package/dist/providers/github-copilot-headers.d.ts +8 -0
- package/dist/providers/github-copilot-headers.d.ts.map +1 -0
- package/dist/providers/github-copilot-headers.js +29 -0
- package/dist/providers/github-copilot-headers.js.map +1 -0
- package/dist/providers/openai-codex-responses.d.ts +9 -0
- package/dist/providers/openai-codex-responses.d.ts.map +1 -0
- package/dist/providers/openai-codex-responses.js +741 -0
- package/dist/providers/openai-codex-responses.js.map +1 -0
- package/dist/providers/openai-completions.d.ts +15 -0
- package/dist/providers/openai-completions.d.ts.map +1 -0
- package/dist/providers/openai-completions.js +687 -0
- package/dist/providers/openai-completions.js.map +1 -0
- package/dist/providers/openai-responses-shared.d.ts +17 -0
- package/dist/providers/openai-responses-shared.d.ts.map +1 -0
- package/dist/providers/openai-responses-shared.js +458 -0
- package/dist/providers/openai-responses-shared.js.map +1 -0
- package/dist/providers/openai-responses.d.ts +13 -0
- package/dist/providers/openai-responses.d.ts.map +1 -0
- package/dist/providers/openai-responses.js +190 -0
- package/dist/providers/openai-responses.js.map +1 -0
- package/dist/providers/register-builtins.d.ts +16 -0
- package/dist/providers/register-builtins.d.ts.map +1 -0
- package/dist/providers/register-builtins.js +140 -0
- package/dist/providers/register-builtins.js.map +1 -0
- package/dist/providers/simple-options.d.ts +8 -0
- package/dist/providers/simple-options.d.ts.map +1 -0
- package/dist/providers/simple-options.js +35 -0
- package/dist/providers/simple-options.js.map +1 -0
- package/dist/providers/transform-messages.d.ts +8 -0
- package/dist/providers/transform-messages.d.ts.map +1 -0
- package/dist/providers/transform-messages.js +155 -0
- package/dist/providers/transform-messages.js.map +1 -0
- package/dist/stream.d.ts +8 -0
- package/dist/stream.d.ts.map +1 -0
- package/dist/stream.js +27 -0
- package/dist/stream.js.map +1 -0
- package/dist/types.d.ts +283 -0
- package/dist/types.d.ts.map +1 -0
- package/dist/types.js +2 -0
- package/dist/types.js.map +1 -0
- package/dist/utils/event-stream.d.ts +21 -0
- package/dist/utils/event-stream.d.ts.map +1 -0
- package/dist/utils/event-stream.js +81 -0
- package/dist/utils/event-stream.js.map +1 -0
- package/dist/utils/hash.d.ts +3 -0
- package/dist/utils/hash.d.ts.map +1 -0
- package/dist/utils/hash.js +14 -0
- package/dist/utils/hash.js.map +1 -0
- package/dist/utils/json-parse.d.ts +9 -0
- package/dist/utils/json-parse.d.ts.map +1 -0
- package/dist/utils/json-parse.js +29 -0
- package/dist/utils/json-parse.js.map +1 -0
- package/dist/utils/oauth/anthropic.d.ts +25 -0
- package/dist/utils/oauth/anthropic.d.ts.map +1 -0
- package/dist/utils/oauth/anthropic.js +335 -0
- package/dist/utils/oauth/anthropic.js.map +1 -0
- package/dist/utils/oauth/github-copilot.d.ts +30 -0
- package/dist/utils/oauth/github-copilot.d.ts.map +1 -0
- package/dist/utils/oauth/github-copilot.js +292 -0
- package/dist/utils/oauth/github-copilot.js.map +1 -0
- package/dist/utils/oauth/index.d.ts +36 -0
- package/dist/utils/oauth/index.d.ts.map +1 -0
- package/dist/utils/oauth/index.js +92 -0
- package/dist/utils/oauth/index.js.map +1 -0
- package/dist/utils/oauth/oauth-page.d.ts +3 -0
- package/dist/utils/oauth/oauth-page.d.ts.map +1 -0
- package/dist/utils/oauth/oauth-page.js +105 -0
- package/dist/utils/oauth/oauth-page.js.map +1 -0
- package/dist/utils/oauth/openai-codex.d.ts +34 -0
- package/dist/utils/oauth/openai-codex.d.ts.map +1 -0
- package/dist/utils/oauth/openai-codex.js +373 -0
- package/dist/utils/oauth/openai-codex.js.map +1 -0
- package/dist/utils/oauth/pkce.d.ts +13 -0
- package/dist/utils/oauth/pkce.d.ts.map +1 -0
- package/dist/utils/oauth/pkce.js +31 -0
- package/dist/utils/oauth/pkce.js.map +1 -0
- package/dist/utils/oauth/types.d.ts +47 -0
- package/dist/utils/oauth/types.d.ts.map +1 -0
- package/dist/utils/oauth/types.js +2 -0
- package/dist/utils/oauth/types.js.map +1 -0
- package/dist/utils/overflow.d.ts +53 -0
- package/dist/utils/overflow.d.ts.map +1 -0
- package/dist/utils/overflow.js +119 -0
- package/dist/utils/overflow.js.map +1 -0
- package/dist/utils/sanitize-unicode.d.ts +22 -0
- package/dist/utils/sanitize-unicode.d.ts.map +1 -0
- package/dist/utils/sanitize-unicode.js +26 -0
- package/dist/utils/sanitize-unicode.js.map +1 -0
- package/dist/utils/typebox-helpers.d.ts +17 -0
- package/dist/utils/typebox-helpers.d.ts.map +1 -0
- package/dist/utils/typebox-helpers.js +21 -0
- package/dist/utils/typebox-helpers.js.map +1 -0
- package/dist/utils/validation.d.ts +18 -0
- package/dist/utils/validation.d.ts.map +1 -0
- package/dist/utils/validation.js +80 -0
- package/dist/utils/validation.js.map +1 -0
- package/package.json +89 -0
- package/src/api-registry.ts +98 -0
- package/src/cli.ts +136 -0
- package/src/env-api-keys.ts +22 -0
- package/src/index.ts +29 -0
- package/src/models.generated.ts +2188 -0
- package/src/models.ts +82 -0
- package/src/oauth.ts +1 -0
- package/src/providers/anthropic.ts +905 -0
- package/src/providers/faux.ts +498 -0
- package/src/providers/github-copilot-headers.ts +37 -0
- package/src/providers/openai-codex-responses.ts +929 -0
- package/src/providers/openai-completions.ts +811 -0
- package/src/providers/openai-responses-shared.ts +513 -0
- package/src/providers/openai-responses.ts +251 -0
- package/src/providers/register-builtins.ts +232 -0
- package/src/providers/simple-options.ts +46 -0
- package/src/providers/transform-messages.ts +172 -0
- package/src/stream.ts +59 -0
- package/src/types.ts +294 -0
- package/src/utils/event-stream.ts +87 -0
- package/src/utils/hash.ts +13 -0
- package/src/utils/json-parse.ts +28 -0
- package/src/utils/oauth/anthropic.ts +402 -0
- package/src/utils/oauth/github-copilot.ts +396 -0
- package/src/utils/oauth/index.ts +123 -0
- package/src/utils/oauth/oauth-page.ts +109 -0
- package/src/utils/oauth/openai-codex.ts +450 -0
- package/src/utils/oauth/pkce.ts +34 -0
- package/src/utils/oauth/types.ts +59 -0
- package/src/utils/overflow.ts +125 -0
- package/src/utils/sanitize-unicode.ts +25 -0
- package/src/utils/typebox-helpers.ts +24 -0
- package/src/utils/validation.ts +93 -0
|
@@ -0,0 +1,450 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* OpenAI Codex (ChatGPT OAuth) flow
|
|
3
|
+
*
|
|
4
|
+
* NOTE: This module uses Node.js crypto and http for the OAuth callback.
|
|
5
|
+
* It is only intended for CLI use, not browser environments.
|
|
6
|
+
*/
|
|
7
|
+
|
|
8
|
+
// NEVER convert to top-level imports - breaks browser/Vite builds (web-ui)
|
|
9
|
+
let _randomBytes: typeof import("node:crypto").randomBytes | null = null;
|
|
10
|
+
let _http: typeof import("node:http") | null = null;
|
|
11
|
+
if (typeof process !== "undefined" && (process.versions?.node || process.versions?.bun)) {
|
|
12
|
+
import("node:crypto").then((m) => {
|
|
13
|
+
_randomBytes = m.randomBytes;
|
|
14
|
+
});
|
|
15
|
+
import("node:http").then((m) => {
|
|
16
|
+
_http = m;
|
|
17
|
+
});
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
import { oauthErrorHtml, oauthSuccessHtml } from "./oauth-page.js";
|
|
21
|
+
import { generatePKCE } from "./pkce.js";
|
|
22
|
+
import type { OAuthCredentials, OAuthLoginCallbacks, OAuthPrompt, OAuthProviderInterface } from "./types.js";
|
|
23
|
+
|
|
24
|
+
const CLIENT_ID = "app_EMoamEEZ73f0CkXaXp7hrann";
|
|
25
|
+
const AUTHORIZE_URL = "https://auth.openai.com/oauth/authorize";
|
|
26
|
+
const TOKEN_URL = "https://auth.openai.com/oauth/token";
|
|
27
|
+
const REDIRECT_URI = "http://localhost:1455/auth/callback";
|
|
28
|
+
const SCOPE = "openid profile email offline_access";
|
|
29
|
+
const JWT_CLAIM_PATH = "https://api.openai.com/auth";
|
|
30
|
+
|
|
31
|
+
type TokenSuccess = { type: "success"; access: string; refresh: string; expires: number };
|
|
32
|
+
type TokenFailure = { type: "failed" };
|
|
33
|
+
type TokenResult = TokenSuccess | TokenFailure;
|
|
34
|
+
|
|
35
|
+
type JwtPayload = {
|
|
36
|
+
[JWT_CLAIM_PATH]?: {
|
|
37
|
+
chatgpt_account_id?: string;
|
|
38
|
+
};
|
|
39
|
+
[key: string]: unknown;
|
|
40
|
+
};
|
|
41
|
+
|
|
42
|
+
function createState(): string {
|
|
43
|
+
if (!_randomBytes) {
|
|
44
|
+
throw new Error("OpenAI Codex OAuth is only available in Node.js environments");
|
|
45
|
+
}
|
|
46
|
+
return _randomBytes(16).toString("hex");
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
function parseAuthorizationInput(input: string): { code?: string; state?: string } {
|
|
50
|
+
const value = input.trim();
|
|
51
|
+
if (!value) return {};
|
|
52
|
+
|
|
53
|
+
try {
|
|
54
|
+
const url = new URL(value);
|
|
55
|
+
return {
|
|
56
|
+
code: url.searchParams.get("code") ?? undefined,
|
|
57
|
+
state: url.searchParams.get("state") ?? undefined,
|
|
58
|
+
};
|
|
59
|
+
} catch {
|
|
60
|
+
// not a URL
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
if (value.includes("#")) {
|
|
64
|
+
const [code, state] = value.split("#", 2);
|
|
65
|
+
return { code, state };
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
if (value.includes("code=")) {
|
|
69
|
+
const params = new URLSearchParams(value);
|
|
70
|
+
return {
|
|
71
|
+
code: params.get("code") ?? undefined,
|
|
72
|
+
state: params.get("state") ?? undefined,
|
|
73
|
+
};
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
return { code: value };
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
function decodeJwt(token: string): JwtPayload | null {
|
|
80
|
+
try {
|
|
81
|
+
const parts = token.split(".");
|
|
82
|
+
if (parts.length !== 3) return null;
|
|
83
|
+
const payload = parts[1] ?? "";
|
|
84
|
+
const decoded = atob(payload);
|
|
85
|
+
return JSON.parse(decoded) as JwtPayload;
|
|
86
|
+
} catch {
|
|
87
|
+
return null;
|
|
88
|
+
}
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
async function exchangeAuthorizationCode(
|
|
92
|
+
code: string,
|
|
93
|
+
verifier: string,
|
|
94
|
+
redirectUri: string = REDIRECT_URI,
|
|
95
|
+
): Promise<TokenResult> {
|
|
96
|
+
const response = await fetch(TOKEN_URL, {
|
|
97
|
+
method: "POST",
|
|
98
|
+
headers: { "Content-Type": "application/x-www-form-urlencoded" },
|
|
99
|
+
body: new URLSearchParams({
|
|
100
|
+
grant_type: "authorization_code",
|
|
101
|
+
client_id: CLIENT_ID,
|
|
102
|
+
code,
|
|
103
|
+
code_verifier: verifier,
|
|
104
|
+
redirect_uri: redirectUri,
|
|
105
|
+
}),
|
|
106
|
+
});
|
|
107
|
+
|
|
108
|
+
if (!response.ok) {
|
|
109
|
+
const text = await response.text().catch(() => "");
|
|
110
|
+
console.error("[openai-codex] code->token failed:", response.status, text);
|
|
111
|
+
return { type: "failed" };
|
|
112
|
+
}
|
|
113
|
+
|
|
114
|
+
const json = (await response.json()) as {
|
|
115
|
+
access_token?: string;
|
|
116
|
+
refresh_token?: string;
|
|
117
|
+
expires_in?: number;
|
|
118
|
+
};
|
|
119
|
+
|
|
120
|
+
if (!json.access_token || !json.refresh_token || typeof json.expires_in !== "number") {
|
|
121
|
+
console.error("[openai-codex] token response missing fields:", json);
|
|
122
|
+
return { type: "failed" };
|
|
123
|
+
}
|
|
124
|
+
|
|
125
|
+
return {
|
|
126
|
+
type: "success",
|
|
127
|
+
access: json.access_token,
|
|
128
|
+
refresh: json.refresh_token,
|
|
129
|
+
expires: Date.now() + json.expires_in * 1000,
|
|
130
|
+
};
|
|
131
|
+
}
|
|
132
|
+
|
|
133
|
+
async function refreshAccessToken(refreshToken: string): Promise<TokenResult> {
|
|
134
|
+
try {
|
|
135
|
+
const response = await fetch(TOKEN_URL, {
|
|
136
|
+
method: "POST",
|
|
137
|
+
headers: { "Content-Type": "application/x-www-form-urlencoded" },
|
|
138
|
+
body: new URLSearchParams({
|
|
139
|
+
grant_type: "refresh_token",
|
|
140
|
+
refresh_token: refreshToken,
|
|
141
|
+
client_id: CLIENT_ID,
|
|
142
|
+
}),
|
|
143
|
+
});
|
|
144
|
+
|
|
145
|
+
if (!response.ok) {
|
|
146
|
+
const text = await response.text().catch(() => "");
|
|
147
|
+
console.error("[openai-codex] Token refresh failed:", response.status, text);
|
|
148
|
+
return { type: "failed" };
|
|
149
|
+
}
|
|
150
|
+
|
|
151
|
+
const json = (await response.json()) as {
|
|
152
|
+
access_token?: string;
|
|
153
|
+
refresh_token?: string;
|
|
154
|
+
expires_in?: number;
|
|
155
|
+
};
|
|
156
|
+
|
|
157
|
+
if (!json.access_token || !json.refresh_token || typeof json.expires_in !== "number") {
|
|
158
|
+
console.error("[openai-codex] Token refresh response missing fields:", json);
|
|
159
|
+
return { type: "failed" };
|
|
160
|
+
}
|
|
161
|
+
|
|
162
|
+
return {
|
|
163
|
+
type: "success",
|
|
164
|
+
access: json.access_token,
|
|
165
|
+
refresh: json.refresh_token,
|
|
166
|
+
expires: Date.now() + json.expires_in * 1000,
|
|
167
|
+
};
|
|
168
|
+
} catch (error) {
|
|
169
|
+
console.error("[openai-codex] Token refresh error:", error);
|
|
170
|
+
return { type: "failed" };
|
|
171
|
+
}
|
|
172
|
+
}
|
|
173
|
+
|
|
174
|
+
async function createAuthorizationFlow(
|
|
175
|
+
originator: string = "pi",
|
|
176
|
+
): Promise<{ verifier: string; state: string; url: string }> {
|
|
177
|
+
const { verifier, challenge } = await generatePKCE();
|
|
178
|
+
const state = createState();
|
|
179
|
+
|
|
180
|
+
const url = new URL(AUTHORIZE_URL);
|
|
181
|
+
url.searchParams.set("response_type", "code");
|
|
182
|
+
url.searchParams.set("client_id", CLIENT_ID);
|
|
183
|
+
url.searchParams.set("redirect_uri", REDIRECT_URI);
|
|
184
|
+
url.searchParams.set("scope", SCOPE);
|
|
185
|
+
url.searchParams.set("code_challenge", challenge);
|
|
186
|
+
url.searchParams.set("code_challenge_method", "S256");
|
|
187
|
+
url.searchParams.set("state", state);
|
|
188
|
+
url.searchParams.set("id_token_add_organizations", "true");
|
|
189
|
+
url.searchParams.set("codex_cli_simplified_flow", "true");
|
|
190
|
+
url.searchParams.set("originator", originator);
|
|
191
|
+
|
|
192
|
+
return { verifier, state, url: url.toString() };
|
|
193
|
+
}
|
|
194
|
+
|
|
195
|
+
type OAuthServerInfo = {
|
|
196
|
+
close: () => void;
|
|
197
|
+
cancelWait: () => void;
|
|
198
|
+
waitForCode: () => Promise<{ code: string } | null>;
|
|
199
|
+
};
|
|
200
|
+
|
|
201
|
+
function startLocalOAuthServer(state: string): Promise<OAuthServerInfo> {
|
|
202
|
+
if (!_http) {
|
|
203
|
+
throw new Error("OpenAI Codex OAuth is only available in Node.js environments");
|
|
204
|
+
}
|
|
205
|
+
|
|
206
|
+
let settleWait: ((value: { code: string } | null) => void) | undefined;
|
|
207
|
+
const waitForCodePromise = new Promise<{ code: string } | null>((resolve) => {
|
|
208
|
+
let settled = false;
|
|
209
|
+
settleWait = (value) => {
|
|
210
|
+
if (settled) return;
|
|
211
|
+
settled = true;
|
|
212
|
+
resolve(value);
|
|
213
|
+
};
|
|
214
|
+
});
|
|
215
|
+
|
|
216
|
+
const server = _http.createServer((req, res) => {
|
|
217
|
+
try {
|
|
218
|
+
const url = new URL(req.url || "", "http://localhost");
|
|
219
|
+
if (url.pathname !== "/auth/callback") {
|
|
220
|
+
res.statusCode = 404;
|
|
221
|
+
res.setHeader("Content-Type", "text/html; charset=utf-8");
|
|
222
|
+
res.end(oauthErrorHtml("Callback route not found."));
|
|
223
|
+
return;
|
|
224
|
+
}
|
|
225
|
+
if (url.searchParams.get("state") !== state) {
|
|
226
|
+
res.statusCode = 400;
|
|
227
|
+
res.setHeader("Content-Type", "text/html; charset=utf-8");
|
|
228
|
+
res.end(oauthErrorHtml("State mismatch."));
|
|
229
|
+
return;
|
|
230
|
+
}
|
|
231
|
+
const code = url.searchParams.get("code");
|
|
232
|
+
if (!code) {
|
|
233
|
+
res.statusCode = 400;
|
|
234
|
+
res.setHeader("Content-Type", "text/html; charset=utf-8");
|
|
235
|
+
res.end(oauthErrorHtml("Missing authorization code."));
|
|
236
|
+
return;
|
|
237
|
+
}
|
|
238
|
+
res.statusCode = 200;
|
|
239
|
+
res.setHeader("Content-Type", "text/html; charset=utf-8");
|
|
240
|
+
res.end(oauthSuccessHtml("OpenAI authentication completed. You can close this window."));
|
|
241
|
+
settleWait?.({ code });
|
|
242
|
+
} catch {
|
|
243
|
+
res.statusCode = 500;
|
|
244
|
+
res.setHeader("Content-Type", "text/html; charset=utf-8");
|
|
245
|
+
res.end(oauthErrorHtml("Internal error while processing OAuth callback."));
|
|
246
|
+
}
|
|
247
|
+
});
|
|
248
|
+
|
|
249
|
+
return new Promise((resolve) => {
|
|
250
|
+
server
|
|
251
|
+
.listen(1455, "127.0.0.1", () => {
|
|
252
|
+
resolve({
|
|
253
|
+
close: () => server.close(),
|
|
254
|
+
cancelWait: () => {
|
|
255
|
+
settleWait?.(null);
|
|
256
|
+
},
|
|
257
|
+
waitForCode: () => waitForCodePromise,
|
|
258
|
+
});
|
|
259
|
+
})
|
|
260
|
+
.on("error", (err: NodeJS.ErrnoException) => {
|
|
261
|
+
console.error(
|
|
262
|
+
"[openai-codex] Failed to bind http://127.0.0.1:1455 (",
|
|
263
|
+
err.code,
|
|
264
|
+
") Falling back to manual paste.",
|
|
265
|
+
);
|
|
266
|
+
settleWait?.(null);
|
|
267
|
+
resolve({
|
|
268
|
+
close: () => {
|
|
269
|
+
try {
|
|
270
|
+
server.close();
|
|
271
|
+
} catch {
|
|
272
|
+
// ignore
|
|
273
|
+
}
|
|
274
|
+
},
|
|
275
|
+
cancelWait: () => {},
|
|
276
|
+
waitForCode: async () => null,
|
|
277
|
+
});
|
|
278
|
+
});
|
|
279
|
+
});
|
|
280
|
+
}
|
|
281
|
+
|
|
282
|
+
function getAccountId(accessToken: string): string | null {
|
|
283
|
+
const payload = decodeJwt(accessToken);
|
|
284
|
+
const auth = payload?.[JWT_CLAIM_PATH];
|
|
285
|
+
const accountId = auth?.chatgpt_account_id;
|
|
286
|
+
return typeof accountId === "string" && accountId.length > 0 ? accountId : null;
|
|
287
|
+
}
|
|
288
|
+
|
|
289
|
+
/**
|
|
290
|
+
* Login with OpenAI Codex OAuth
|
|
291
|
+
*
|
|
292
|
+
* @param options.onAuth - Called with URL and instructions when auth starts
|
|
293
|
+
* @param options.onPrompt - Called to prompt user for manual code paste (fallback if no onManualCodeInput)
|
|
294
|
+
* @param options.onProgress - Optional progress messages
|
|
295
|
+
* @param options.onManualCodeInput - Optional promise that resolves with user-pasted code.
|
|
296
|
+
* Races with browser callback - whichever completes first wins.
|
|
297
|
+
* Useful for showing paste input immediately alongside browser flow.
|
|
298
|
+
* @param options.originator - OAuth originator parameter (defaults to "pi")
|
|
299
|
+
*/
|
|
300
|
+
export async function loginOpenAICodex(options: {
|
|
301
|
+
onAuth: (info: { url: string; instructions?: string }) => void;
|
|
302
|
+
onPrompt: (prompt: OAuthPrompt) => Promise<string>;
|
|
303
|
+
onProgress?: (message: string) => void;
|
|
304
|
+
onManualCodeInput?: () => Promise<string>;
|
|
305
|
+
originator?: string;
|
|
306
|
+
}): Promise<OAuthCredentials> {
|
|
307
|
+
const { verifier, state, url } = await createAuthorizationFlow(options.originator);
|
|
308
|
+
const server = await startLocalOAuthServer(state);
|
|
309
|
+
|
|
310
|
+
options.onAuth({ url, instructions: "A browser window should open. Complete login to finish." });
|
|
311
|
+
|
|
312
|
+
let code: string | undefined;
|
|
313
|
+
try {
|
|
314
|
+
if (options.onManualCodeInput) {
|
|
315
|
+
// Race between browser callback and manual input
|
|
316
|
+
let manualCode: string | undefined;
|
|
317
|
+
let manualError: Error | undefined;
|
|
318
|
+
const manualPromise = options
|
|
319
|
+
.onManualCodeInput()
|
|
320
|
+
.then((input) => {
|
|
321
|
+
manualCode = input;
|
|
322
|
+
server.cancelWait();
|
|
323
|
+
})
|
|
324
|
+
.catch((err) => {
|
|
325
|
+
manualError = err instanceof Error ? err : new Error(String(err));
|
|
326
|
+
server.cancelWait();
|
|
327
|
+
});
|
|
328
|
+
|
|
329
|
+
const result = await server.waitForCode();
|
|
330
|
+
|
|
331
|
+
// If manual input was cancelled, throw that error
|
|
332
|
+
if (manualError) {
|
|
333
|
+
throw manualError;
|
|
334
|
+
}
|
|
335
|
+
|
|
336
|
+
if (result?.code) {
|
|
337
|
+
// Browser callback won
|
|
338
|
+
code = result.code;
|
|
339
|
+
} else if (manualCode) {
|
|
340
|
+
// Manual input won (or callback timed out and user had entered code)
|
|
341
|
+
const parsed = parseAuthorizationInput(manualCode);
|
|
342
|
+
if (parsed.state && parsed.state !== state) {
|
|
343
|
+
throw new Error("State mismatch");
|
|
344
|
+
}
|
|
345
|
+
code = parsed.code;
|
|
346
|
+
}
|
|
347
|
+
|
|
348
|
+
// If still no code, wait for manual promise to complete and try that
|
|
349
|
+
if (!code) {
|
|
350
|
+
await manualPromise;
|
|
351
|
+
if (manualError) {
|
|
352
|
+
throw manualError;
|
|
353
|
+
}
|
|
354
|
+
if (manualCode) {
|
|
355
|
+
const parsed = parseAuthorizationInput(manualCode);
|
|
356
|
+
if (parsed.state && parsed.state !== state) {
|
|
357
|
+
throw new Error("State mismatch");
|
|
358
|
+
}
|
|
359
|
+
code = parsed.code;
|
|
360
|
+
}
|
|
361
|
+
}
|
|
362
|
+
} else {
|
|
363
|
+
// Original flow: wait for callback, then prompt if needed
|
|
364
|
+
const result = await server.waitForCode();
|
|
365
|
+
if (result?.code) {
|
|
366
|
+
code = result.code;
|
|
367
|
+
}
|
|
368
|
+
}
|
|
369
|
+
|
|
370
|
+
// Fallback to onPrompt if still no code
|
|
371
|
+
if (!code) {
|
|
372
|
+
const input = await options.onPrompt({
|
|
373
|
+
message: "Paste the authorization code (or full redirect URL):",
|
|
374
|
+
});
|
|
375
|
+
const parsed = parseAuthorizationInput(input);
|
|
376
|
+
if (parsed.state && parsed.state !== state) {
|
|
377
|
+
throw new Error("State mismatch");
|
|
378
|
+
}
|
|
379
|
+
code = parsed.code;
|
|
380
|
+
}
|
|
381
|
+
|
|
382
|
+
if (!code) {
|
|
383
|
+
throw new Error("Missing authorization code");
|
|
384
|
+
}
|
|
385
|
+
|
|
386
|
+
const tokenResult = await exchangeAuthorizationCode(code, verifier);
|
|
387
|
+
if (tokenResult.type !== "success") {
|
|
388
|
+
throw new Error("Token exchange failed");
|
|
389
|
+
}
|
|
390
|
+
|
|
391
|
+
const accountId = getAccountId(tokenResult.access);
|
|
392
|
+
if (!accountId) {
|
|
393
|
+
throw new Error("Failed to extract accountId from token");
|
|
394
|
+
}
|
|
395
|
+
|
|
396
|
+
return {
|
|
397
|
+
access: tokenResult.access,
|
|
398
|
+
refresh: tokenResult.refresh,
|
|
399
|
+
expires: tokenResult.expires,
|
|
400
|
+
accountId,
|
|
401
|
+
};
|
|
402
|
+
} finally {
|
|
403
|
+
server.close();
|
|
404
|
+
}
|
|
405
|
+
}
|
|
406
|
+
|
|
407
|
+
/**
|
|
408
|
+
* Refresh OpenAI Codex OAuth token
|
|
409
|
+
*/
|
|
410
|
+
export async function refreshOpenAICodexToken(refreshToken: string): Promise<OAuthCredentials> {
|
|
411
|
+
const result = await refreshAccessToken(refreshToken);
|
|
412
|
+
if (result.type !== "success") {
|
|
413
|
+
throw new Error("Failed to refresh OpenAI Codex token");
|
|
414
|
+
}
|
|
415
|
+
|
|
416
|
+
const accountId = getAccountId(result.access);
|
|
417
|
+
if (!accountId) {
|
|
418
|
+
throw new Error("Failed to extract accountId from token");
|
|
419
|
+
}
|
|
420
|
+
|
|
421
|
+
return {
|
|
422
|
+
access: result.access,
|
|
423
|
+
refresh: result.refresh,
|
|
424
|
+
expires: result.expires,
|
|
425
|
+
accountId,
|
|
426
|
+
};
|
|
427
|
+
}
|
|
428
|
+
|
|
429
|
+
export const openaiCodexOAuthProvider: OAuthProviderInterface = {
|
|
430
|
+
id: "openai-codex",
|
|
431
|
+
name: "ChatGPT Plus/Pro (Codex Subscription)",
|
|
432
|
+
usesCallbackServer: true,
|
|
433
|
+
|
|
434
|
+
async login(callbacks: OAuthLoginCallbacks): Promise<OAuthCredentials> {
|
|
435
|
+
return loginOpenAICodex({
|
|
436
|
+
onAuth: callbacks.onAuth,
|
|
437
|
+
onPrompt: callbacks.onPrompt,
|
|
438
|
+
onProgress: callbacks.onProgress,
|
|
439
|
+
onManualCodeInput: callbacks.onManualCodeInput,
|
|
440
|
+
});
|
|
441
|
+
},
|
|
442
|
+
|
|
443
|
+
async refreshToken(credentials: OAuthCredentials): Promise<OAuthCredentials> {
|
|
444
|
+
return refreshOpenAICodexToken(credentials.refresh);
|
|
445
|
+
},
|
|
446
|
+
|
|
447
|
+
getApiKey(credentials: OAuthCredentials): string {
|
|
448
|
+
return credentials.access;
|
|
449
|
+
},
|
|
450
|
+
};
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* PKCE utilities using Web Crypto API.
|
|
3
|
+
* Works in both Node.js 20+ and browsers.
|
|
4
|
+
*/
|
|
5
|
+
|
|
6
|
+
/**
|
|
7
|
+
* Encode bytes as base64url string.
|
|
8
|
+
*/
|
|
9
|
+
function base64urlEncode(bytes: Uint8Array): string {
|
|
10
|
+
let binary = "";
|
|
11
|
+
for (const byte of bytes) {
|
|
12
|
+
binary += String.fromCharCode(byte);
|
|
13
|
+
}
|
|
14
|
+
return btoa(binary).replace(/\+/g, "-").replace(/\//g, "_").replace(/=/g, "");
|
|
15
|
+
}
|
|
16
|
+
|
|
17
|
+
/**
|
|
18
|
+
* Generate PKCE code verifier and challenge.
|
|
19
|
+
* Uses Web Crypto API for cross-platform compatibility.
|
|
20
|
+
*/
|
|
21
|
+
export async function generatePKCE(): Promise<{ verifier: string; challenge: string }> {
|
|
22
|
+
// Generate random verifier
|
|
23
|
+
const verifierBytes = new Uint8Array(32);
|
|
24
|
+
crypto.getRandomValues(verifierBytes);
|
|
25
|
+
const verifier = base64urlEncode(verifierBytes);
|
|
26
|
+
|
|
27
|
+
// Compute SHA-256 challenge
|
|
28
|
+
const encoder = new TextEncoder();
|
|
29
|
+
const data = encoder.encode(verifier);
|
|
30
|
+
const hashBuffer = await crypto.subtle.digest("SHA-256", data);
|
|
31
|
+
const challenge = base64urlEncode(new Uint8Array(hashBuffer));
|
|
32
|
+
|
|
33
|
+
return { verifier, challenge };
|
|
34
|
+
}
|
|
@@ -0,0 +1,59 @@
|
|
|
1
|
+
import type { Api, Model } from "../../types.js";
|
|
2
|
+
|
|
3
|
+
export type OAuthCredentials = {
|
|
4
|
+
refresh: string;
|
|
5
|
+
access: string;
|
|
6
|
+
expires: number;
|
|
7
|
+
[key: string]: unknown;
|
|
8
|
+
};
|
|
9
|
+
|
|
10
|
+
export type OAuthProviderId = string;
|
|
11
|
+
|
|
12
|
+
/** @deprecated Use OAuthProviderId instead */
|
|
13
|
+
export type OAuthProvider = OAuthProviderId;
|
|
14
|
+
|
|
15
|
+
export type OAuthPrompt = {
|
|
16
|
+
message: string;
|
|
17
|
+
placeholder?: string;
|
|
18
|
+
allowEmpty?: boolean;
|
|
19
|
+
};
|
|
20
|
+
|
|
21
|
+
export type OAuthAuthInfo = {
|
|
22
|
+
url: string;
|
|
23
|
+
instructions?: string;
|
|
24
|
+
};
|
|
25
|
+
|
|
26
|
+
export interface OAuthLoginCallbacks {
|
|
27
|
+
onAuth: (info: OAuthAuthInfo) => void;
|
|
28
|
+
onPrompt: (prompt: OAuthPrompt) => Promise<string>;
|
|
29
|
+
onProgress?: (message: string) => void;
|
|
30
|
+
onManualCodeInput?: () => Promise<string>;
|
|
31
|
+
signal?: AbortSignal;
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
export interface OAuthProviderInterface {
|
|
35
|
+
readonly id: OAuthProviderId;
|
|
36
|
+
readonly name: string;
|
|
37
|
+
|
|
38
|
+
/** Run the login flow, return credentials to persist */
|
|
39
|
+
login(callbacks: OAuthLoginCallbacks): Promise<OAuthCredentials>;
|
|
40
|
+
|
|
41
|
+
/** Whether login uses a local callback server and supports manual code input. */
|
|
42
|
+
usesCallbackServer?: boolean;
|
|
43
|
+
|
|
44
|
+
/** Refresh expired credentials, return updated credentials to persist */
|
|
45
|
+
refreshToken(credentials: OAuthCredentials): Promise<OAuthCredentials>;
|
|
46
|
+
|
|
47
|
+
/** Convert credentials to API key string for the provider */
|
|
48
|
+
getApiKey(credentials: OAuthCredentials): string;
|
|
49
|
+
|
|
50
|
+
/** Optional: modify models for this provider (e.g., update baseUrl) */
|
|
51
|
+
modifyModels?(models: Model<Api>[], credentials: OAuthCredentials): Model<Api>[];
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
/** @deprecated Use OAuthProviderInterface instead */
|
|
55
|
+
export interface OAuthProviderInfo {
|
|
56
|
+
id: OAuthProviderId;
|
|
57
|
+
name: string;
|
|
58
|
+
available: boolean;
|
|
59
|
+
}
|
|
@@ -0,0 +1,125 @@
|
|
|
1
|
+
import type { AssistantMessage } from "../types.js";
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* Regex patterns to detect context overflow errors from different providers.
|
|
5
|
+
*
|
|
6
|
+
* These patterns match error messages returned when the input exceeds
|
|
7
|
+
* the model's context window.
|
|
8
|
+
*
|
|
9
|
+
* Provider-specific patterns (with example error messages):
|
|
10
|
+
*
|
|
11
|
+
* - Anthropic: "prompt is too long: 213462 tokens > 200000 maximum"
|
|
12
|
+
* - OpenAI: "Your input exceeds the context window of this model"
|
|
13
|
+
* - Google: "The input token count (1196265) exceeds the maximum number of tokens allowed (1048575)"
|
|
14
|
+
* - xAI: "This model's maximum prompt length is 131072 but the request contains 537812 tokens"
|
|
15
|
+
* - Groq: "Please reduce the length of the messages or completion"
|
|
16
|
+
* - OpenRouter: "This endpoint's maximum context length is X tokens. However, you requested about Y tokens"
|
|
17
|
+
* - llama.cpp: "the request exceeds the available context size, try increasing it"
|
|
18
|
+
* - LM Studio: "tokens to keep from the initial prompt is greater than the context length"
|
|
19
|
+
* - GitHub Copilot: "prompt token count of X exceeds the limit of Y"
|
|
20
|
+
* - MiniMax: "invalid params, context window exceeds limit"
|
|
21
|
+
* - Kimi For Coding: "Your request exceeded model token limit: X (requested: Y)"
|
|
22
|
+
* - Cerebras: Returns "400/413 status code (no body)" - handled separately below
|
|
23
|
+
* - Mistral: "Prompt contains X tokens ... too large for model with Y maximum context length"
|
|
24
|
+
* - z.ai: Does NOT error, accepts overflow silently - handled via usage.input > contextWindow
|
|
25
|
+
* - Ollama: Some deployments truncate silently, others return errors like "prompt too long; exceeded max context length by X tokens"
|
|
26
|
+
*/
|
|
27
|
+
const OVERFLOW_PATTERNS = [
|
|
28
|
+
/prompt is too long/i, // Anthropic
|
|
29
|
+
/input is too long for requested model/i, // Amazon Bedrock
|
|
30
|
+
/exceeds the context window/i, // OpenAI (Completions & Responses API)
|
|
31
|
+
/input token count.*exceeds the maximum/i, // Google (Gemini)
|
|
32
|
+
/maximum prompt length is \d+/i, // xAI (Grok)
|
|
33
|
+
/reduce the length of the messages/i, // Groq
|
|
34
|
+
/maximum context length is \d+ tokens/i, // OpenRouter (all backends)
|
|
35
|
+
/exceeds the limit of \d+/i, // GitHub Copilot
|
|
36
|
+
/exceeds the available context size/i, // llama.cpp server
|
|
37
|
+
/greater than the context length/i, // LM Studio
|
|
38
|
+
/context window exceeds limit/i, // MiniMax
|
|
39
|
+
/exceeded model token limit/i, // Kimi For Coding
|
|
40
|
+
/too large for model with \d+ maximum context length/i, // Mistral
|
|
41
|
+
/model_context_window_exceeded/i, // z.ai non-standard finish_reason surfaced as error text
|
|
42
|
+
/prompt too long; exceeded (?:max )?context length/i, // Ollama explicit overflow error
|
|
43
|
+
/context[_ ]length[_ ]exceeded/i, // Generic fallback
|
|
44
|
+
/too many tokens/i, // Generic fallback
|
|
45
|
+
/token limit exceeded/i, // Generic fallback
|
|
46
|
+
];
|
|
47
|
+
|
|
48
|
+
/**
|
|
49
|
+
* Check if an assistant message represents a context overflow error.
|
|
50
|
+
*
|
|
51
|
+
* This handles two cases:
|
|
52
|
+
* 1. Error-based overflow: Most providers return stopReason "error" with a
|
|
53
|
+
* specific error message pattern.
|
|
54
|
+
* 2. Silent overflow: Some providers accept overflow requests and return
|
|
55
|
+
* successfully. For these, we check if usage.input exceeds the context window.
|
|
56
|
+
*
|
|
57
|
+
* ## Reliability by Provider
|
|
58
|
+
*
|
|
59
|
+
* **Reliable detection (returns error with detectable message):**
|
|
60
|
+
* - Anthropic: "prompt is too long: X tokens > Y maximum"
|
|
61
|
+
* - OpenAI (Completions & Responses): "exceeds the context window"
|
|
62
|
+
* - Google Gemini: "input token count exceeds the maximum"
|
|
63
|
+
* - xAI (Grok): "maximum prompt length is X but request contains Y"
|
|
64
|
+
* - Groq: "reduce the length of the messages"
|
|
65
|
+
* - Cerebras: 400/413 status code (no body)
|
|
66
|
+
* - Mistral: "Prompt contains X tokens ... too large for model with Y maximum context length"
|
|
67
|
+
* - OpenRouter (all backends): "maximum context length is X tokens"
|
|
68
|
+
* - llama.cpp: "exceeds the available context size"
|
|
69
|
+
* - LM Studio: "greater than the context length"
|
|
70
|
+
* - Kimi For Coding: "exceeded model token limit: X (requested: Y)"
|
|
71
|
+
*
|
|
72
|
+
* **Unreliable detection:**
|
|
73
|
+
* - z.ai: Sometimes accepts overflow silently (detectable via usage.input > contextWindow),
|
|
74
|
+
* sometimes returns rate limit errors. Pass contextWindow param to detect silent overflow.
|
|
75
|
+
* - Ollama: May truncate input silently for some setups, but may also return explicit
|
|
76
|
+
* overflow errors that match the patterns above. Silent truncation still cannot be
|
|
77
|
+
* detected here because we do not know the expected token count.
|
|
78
|
+
*
|
|
79
|
+
* ## Custom Providers
|
|
80
|
+
*
|
|
81
|
+
* If you've added custom models via settings.json, this function may not detect
|
|
82
|
+
* overflow errors from those providers. To add support:
|
|
83
|
+
*
|
|
84
|
+
* 1. Send a request that exceeds the model's context window
|
|
85
|
+
* 2. Check the errorMessage in the response
|
|
86
|
+
* 3. Create a regex pattern that matches the error
|
|
87
|
+
* 4. The pattern should be added to OVERFLOW_PATTERNS in this file, or
|
|
88
|
+
* check the errorMessage yourself before calling this function
|
|
89
|
+
*
|
|
90
|
+
* @param message - The assistant message to check
|
|
91
|
+
* @param contextWindow - Optional context window size for detecting silent overflow (z.ai)
|
|
92
|
+
* @returns true if the message indicates a context overflow
|
|
93
|
+
*/
|
|
94
|
+
export function isContextOverflow(message: AssistantMessage, contextWindow?: number): boolean {
|
|
95
|
+
// Case 1: Check error message patterns
|
|
96
|
+
if (message.stopReason === "error" && message.errorMessage) {
|
|
97
|
+
// Check known patterns
|
|
98
|
+
if (OVERFLOW_PATTERNS.some((p) => p.test(message.errorMessage!))) {
|
|
99
|
+
return true;
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
// Cerebras returns 400/413 with no body for context overflow
|
|
103
|
+
// Note: 429 is rate limiting (requests/tokens per time), NOT context overflow
|
|
104
|
+
if (/^4(00|13)\s*(status code)?\s*\(no body\)/i.test(message.errorMessage)) {
|
|
105
|
+
return true;
|
|
106
|
+
}
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
// Case 2: Silent overflow (z.ai style) - successful but usage exceeds context
|
|
110
|
+
if (contextWindow && message.stopReason === "stop") {
|
|
111
|
+
const inputTokens = message.usage.input + message.usage.cacheRead;
|
|
112
|
+
if (inputTokens > contextWindow) {
|
|
113
|
+
return true;
|
|
114
|
+
}
|
|
115
|
+
}
|
|
116
|
+
|
|
117
|
+
return false;
|
|
118
|
+
}
|
|
119
|
+
|
|
120
|
+
/**
|
|
121
|
+
* Get the overflow patterns for testing purposes.
|
|
122
|
+
*/
|
|
123
|
+
export function getOverflowPatterns(): RegExp[] {
|
|
124
|
+
return [...OVERFLOW_PATTERNS];
|
|
125
|
+
}
|
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Removes unpaired Unicode surrogate characters from a string.
|
|
3
|
+
*
|
|
4
|
+
* Unpaired surrogates (high surrogates 0xD800-0xDBFF without matching low surrogates 0xDC00-0xDFFF,
|
|
5
|
+
* or vice versa) cause JSON serialization errors in many API providers.
|
|
6
|
+
*
|
|
7
|
+
* Valid emoji and other characters outside the Basic Multilingual Plane use properly paired
|
|
8
|
+
* surrogates and will NOT be affected by this function.
|
|
9
|
+
*
|
|
10
|
+
* @param text - The text to sanitize
|
|
11
|
+
* @returns The sanitized text with unpaired surrogates removed
|
|
12
|
+
*
|
|
13
|
+
* @example
|
|
14
|
+
* // Valid emoji (properly paired surrogates) are preserved
|
|
15
|
+
* sanitizeSurrogates("Hello 🙈 World") // => "Hello 🙈 World"
|
|
16
|
+
*
|
|
17
|
+
* // Unpaired high surrogate is removed
|
|
18
|
+
* const unpaired = String.fromCharCode(0xD83D); // high surrogate without low
|
|
19
|
+
* sanitizeSurrogates(`Text ${unpaired} here`) // => "Text here"
|
|
20
|
+
*/
|
|
21
|
+
export function sanitizeSurrogates(text: string): string {
|
|
22
|
+
// Replace unpaired high surrogates (0xD800-0xDBFF not followed by low surrogate)
|
|
23
|
+
// Replace unpaired low surrogates (0xDC00-0xDFFF not preceded by high surrogate)
|
|
24
|
+
return text.replace(/[\uD800-\uDBFF](?![\uDC00-\uDFFF])|(?<![\uD800-\uDBFF])[\uDC00-\uDFFF]/g, "");
|
|
25
|
+
}
|