@kernl-sdk/ai 0.4.3 → 0.4.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. package/.turbo/turbo-build.log +1 -1
  2. package/CHANGELOG.md +18 -0
  3. package/dist/__tests__/integration.test.js +10 -11
  4. package/dist/__tests__/language-model.test.js +15 -20
  5. package/dist/convert/__tests__/response.test.js +18 -16
  6. package/dist/convert/__tests__/stream.test.js +21 -20
  7. package/dist/convert/__tests__/tools.test.js +5 -5
  8. package/dist/convert/__tests__/ui-stream.test.js +11 -5
  9. package/dist/convert/response.d.ts +4 -4
  10. package/dist/convert/response.d.ts.map +1 -1
  11. package/dist/convert/response.js +1 -20
  12. package/dist/convert/stream.d.ts.map +1 -1
  13. package/dist/convert/stream.js +1 -7
  14. package/dist/convert/tools.d.ts +2 -2
  15. package/dist/convert/tools.d.ts.map +1 -1
  16. package/dist/convert/tools.js +2 -2
  17. package/dist/embedding-model.d.ts +3 -3
  18. package/dist/embedding-model.d.ts.map +1 -1
  19. package/dist/oauth/anthropic.d.ts +8 -0
  20. package/dist/oauth/anthropic.d.ts.map +1 -0
  21. package/dist/oauth/anthropic.js +65 -0
  22. package/dist/oauth/openai.d.ts +8 -0
  23. package/dist/oauth/openai.d.ts.map +1 -0
  24. package/dist/oauth/openai.js +97 -0
  25. package/dist/oauth/types.d.ts +25 -0
  26. package/dist/oauth/types.d.ts.map +1 -0
  27. package/dist/oauth/types.js +1 -0
  28. package/dist/providers/anthropic.d.ts +42 -2
  29. package/dist/providers/anthropic.d.ts.map +1 -1
  30. package/dist/providers/anthropic.js +34 -1
  31. package/dist/providers/google.d.ts +4 -4
  32. package/dist/providers/google.d.ts.map +1 -1
  33. package/dist/providers/google.js +1 -1
  34. package/dist/providers/openai.d.ts +43 -3
  35. package/dist/providers/openai.d.ts.map +1 -1
  36. package/dist/providers/openai.js +36 -2
  37. package/package.json +9 -9
  38. package/src/__tests__/integration.test.ts +10 -11
  39. package/src/__tests__/language-model.test.ts +15 -20
  40. package/src/convert/__tests__/response.test.ts +23 -21
  41. package/src/convert/__tests__/stream.test.ts +21 -20
  42. package/src/convert/__tests__/tools.test.ts +6 -6
  43. package/src/convert/__tests__/ui-stream.test.ts +11 -5
  44. package/src/convert/response.ts +9 -30
  45. package/src/convert/stream.ts +1 -7
  46. package/src/convert/tools.ts +5 -5
  47. package/src/embedding-model.ts +3 -5
  48. package/src/oauth/anthropic.ts +87 -0
  49. package/src/oauth/openai.ts +129 -0
  50. package/src/oauth/types.ts +25 -0
  51. package/src/providers/anthropic.ts +60 -19
  52. package/src/providers/google.ts +4 -37
  53. package/src/providers/openai.ts +62 -41
@@ -1,51 +1,53 @@
1
1
  import { describe, it, expect } from "vitest";
2
- import type { LanguageModelV3CallWarning } from "@ai-sdk/provider";
2
+ import type { SharedV3Warning } from "@ai-sdk/provider";
3
3
 
4
4
  import { WARNING } from "../response";
5
5
 
6
6
  describe("WARNING codec", () => {
7
7
  describe("decode", () => {
8
- it("should decode unsupported-setting warning", () => {
9
- const aiWarning: LanguageModelV3CallWarning = {
10
- type: "unsupported-setting",
11
- setting: "someUnsupportedSetting",
12
- details: "This setting is not supported by the provider",
8
+ it("should decode unsupported warning", () => {
9
+ const aiWarning: SharedV3Warning = {
10
+ type: "unsupported",
11
+ feature: "someUnsupportedFeature",
12
+ details: "This feature is not supported by the provider",
13
13
  };
14
14
 
15
15
  const result = WARNING.decode(aiWarning);
16
16
 
17
17
  expect(result).toEqual({
18
- type: "unsupported-setting",
19
- setting: "someUnsupportedSetting",
20
- details: "This setting is not supported by the provider",
18
+ type: "unsupported",
19
+ feature: "someUnsupportedFeature",
20
+ details: "This feature is not supported by the provider",
21
21
  });
22
22
  });
23
23
 
24
- it("should decode other warning", () => {
25
- const aiWarning: LanguageModelV3CallWarning = {
26
- type: "other",
27
- message: "Some custom warning message",
24
+ it("should decode compatibility warning", () => {
25
+ const aiWarning: SharedV3Warning = {
26
+ type: "compatibility",
27
+ feature: "someFeature",
28
+ details: "Running in compatibility mode",
28
29
  };
29
30
 
30
31
  const result = WARNING.decode(aiWarning);
31
32
 
32
33
  expect(result).toEqual({
33
- type: "other",
34
- message: "Some custom warning message",
34
+ type: "compatibility",
35
+ feature: "someFeature",
36
+ details: "Running in compatibility mode",
35
37
  });
36
38
  });
37
39
 
38
- it("should handle unknown warning type", () => {
39
- const aiWarning = {
40
- type: "unknown-type",
41
- someField: "value",
42
- } as any;
40
+ it("should decode other warning", () => {
41
+ const aiWarning: SharedV3Warning = {
42
+ type: "other",
43
+ message: "Some custom warning message",
44
+ };
43
45
 
44
46
  const result = WARNING.decode(aiWarning);
45
47
 
46
48
  expect(result).toEqual({
47
49
  type: "other",
48
- message: "Unknown warning type",
50
+ message: "Some custom warning message",
49
51
  });
50
52
  });
51
53
  });
@@ -259,8 +259,8 @@ describe("STREAM_PART codec", () => {
259
259
  type: "stream-start",
260
260
  warnings: [
261
261
  {
262
- type: "unsupported-setting",
263
- setting: "topK",
262
+ type: "unsupported",
263
+ feature: "topK",
264
264
  },
265
265
  ],
266
266
  };
@@ -271,9 +271,8 @@ describe("STREAM_PART codec", () => {
271
271
  kind: "stream.start",
272
272
  warnings: [
273
273
  {
274
- type: "unsupported-setting",
275
- setting: "topK",
276
- details: undefined,
274
+ type: "unsupported",
275
+ feature: "topK",
277
276
  },
278
277
  ],
279
278
  });
@@ -282,11 +281,10 @@ describe("STREAM_PART codec", () => {
282
281
  it("should decode finish event", () => {
283
282
  const part: LanguageModelV3StreamPart = {
284
283
  type: "finish",
285
- finishReason: "stop",
284
+ finishReason: { unified: "stop", raw: "stop" },
286
285
  usage: {
287
- inputTokens: 10,
288
- outputTokens: 20,
289
- totalTokens: 30,
286
+ inputTokens: { total: 10, noCache: 8, cacheRead: 2, cacheWrite: undefined },
287
+ outputTokens: { total: 20, text: 18, reasoning: 2 },
290
288
  },
291
289
  providerMetadata: undefined,
292
290
  };
@@ -295,13 +293,10 @@ describe("STREAM_PART codec", () => {
295
293
 
296
294
  expect(result).toEqual({
297
295
  kind: "finish",
298
- finishReason: "stop",
296
+ finishReason: { unified: "stop", raw: "stop" },
299
297
  usage: {
300
- inputTokens: 10,
301
- outputTokens: 20,
302
- totalTokens: 30,
303
- reasoningTokens: undefined,
304
- cachedInputTokens: undefined,
298
+ inputTokens: { total: 10, noCache: 8, cacheRead: 2, cacheWrite: undefined },
299
+ outputTokens: { total: 20, text: 18, reasoning: 2 },
305
300
  },
306
301
  providerMetadata: undefined,
307
302
  });
@@ -405,8 +400,11 @@ describe("convertStream", () => {
405
400
  { type: "text-end", id: "text-1", providerMetadata: undefined },
406
401
  {
407
402
  type: "finish",
408
- finishReason: "stop",
409
- usage: { inputTokens: 5, outputTokens: 10, totalTokens: 15 },
403
+ finishReason: { unified: "stop", raw: "stop" },
404
+ usage: {
405
+ inputTokens: { total: 5, noCache: 5, cacheRead: undefined, cacheWrite: undefined },
406
+ outputTokens: { total: 10, text: 10, reasoning: undefined },
407
+ },
410
408
  providerMetadata: undefined,
411
409
  },
412
410
  ];
@@ -429,7 +427,7 @@ describe("convertStream", () => {
429
427
  expect(events[0]).toMatchObject({ kind: "text.start" });
430
428
  expect(events[1]).toMatchObject({ kind: "text.delta", text: "Hello" });
431
429
  expect(events[2]).toMatchObject({ kind: "text.end" });
432
- expect(events[3]).toMatchObject({ kind: "finish", finishReason: "stop" });
430
+ expect(events[3]).toMatchObject({ kind: "finish", finishReason: { unified: "stop", raw: "stop" } });
433
431
  });
434
432
 
435
433
  it("should filter out null events", async () => {
@@ -445,8 +443,11 @@ describe("convertStream", () => {
445
443
  } as any, // This should be filtered out (returns null from default case)
446
444
  {
447
445
  type: "finish",
448
- finishReason: "stop",
449
- usage: { inputTokens: 5, outputTokens: 10, totalTokens: 15 },
446
+ finishReason: { unified: "stop", raw: "stop" },
447
+ usage: {
448
+ inputTokens: { total: 5, noCache: 5, cacheRead: undefined, cacheWrite: undefined },
449
+ outputTokens: { total: 10, text: 10, reasoning: undefined },
450
+ },
450
451
  providerMetadata: undefined,
451
452
  },
452
453
  ];
@@ -1,7 +1,7 @@
1
1
  import { describe, it, expect } from "vitest";
2
2
  import type {
3
3
  LanguageModelV3FunctionTool,
4
- LanguageModelV3ProviderDefinedTool,
4
+ LanguageModelV3ProviderTool,
5
5
  LanguageModelV3ToolChoice,
6
6
  } from "@ai-sdk/provider";
7
7
 
@@ -96,8 +96,8 @@ describe("TOOL codec", () => {
96
96
  });
97
97
  });
98
98
 
99
- describe("encode - provider-defined tools", () => {
100
- it("should encode provider-defined tool", () => {
99
+ describe("encode - provider tools", () => {
100
+ it("should encode provider tool", () => {
101
101
  const result = TOOL.encode({
102
102
  kind: "provider-defined",
103
103
  id: "mcp.tool-123",
@@ -106,14 +106,14 @@ describe("TOOL codec", () => {
106
106
  });
107
107
 
108
108
  expect(result).toEqual({
109
- type: "provider-defined",
109
+ type: "provider",
110
110
  id: "mcp.tool-123",
111
111
  name: "custom_mcp_tool",
112
112
  args: { param1: "value1" },
113
113
  });
114
114
  });
115
115
 
116
- it("should encode provider-defined tool without args", () => {
116
+ it("should encode provider tool without args", () => {
117
117
  const result = TOOL.encode({
118
118
  kind: "provider-defined",
119
119
  id: "mcp.tool-id",
@@ -122,7 +122,7 @@ describe("TOOL codec", () => {
122
122
  });
123
123
 
124
124
  expect(result).toEqual({
125
- type: "provider-defined",
125
+ type: "provider",
126
126
  id: "mcp.tool-id",
127
127
  name: "tool_name",
128
128
  args: {},
@@ -274,11 +274,10 @@ describe("STREAM_UI_PART codec", () => {
274
274
  it("should encode finish event", () => {
275
275
  const event: LanguageModelStreamEvent = {
276
276
  kind: "finish",
277
- finishReason: "stop",
277
+ finishReason: { unified: "stop", raw: "stop" },
278
278
  usage: {
279
- inputTokens: 100,
280
- outputTokens: 50,
281
- totalTokens: 150,
279
+ inputTokens: { total: 100, noCache: 100, cacheRead: undefined, cacheWrite: undefined },
280
+ outputTokens: { total: 50, text: 50, reasoning: undefined },
282
281
  },
283
282
  };
284
283
 
@@ -421,7 +420,14 @@ describe("toUIMessageStream", () => {
421
420
  { kind: "text.delta", id: "text-1", text: "Hello" },
422
421
  { kind: "text.delta", id: "text-1", text: " world" },
423
422
  { kind: "text.end", id: "text-1" },
424
- { kind: "finish", finishReason: "stop", usage: { inputTokens: 10, outputTokens: 5, totalTokens: 15 } },
423
+ {
424
+ kind: "finish",
425
+ finishReason: { unified: "stop", raw: "stop" },
426
+ usage: {
427
+ inputTokens: { total: 10, noCache: 10, cacheRead: undefined, cacheWrite: undefined },
428
+ outputTokens: { total: 5, text: 5, reasoning: undefined },
429
+ },
430
+ },
425
431
  ];
426
432
 
427
433
  async function* generateEvents() {
@@ -8,7 +8,7 @@ import {
8
8
  type LanguageModelResponseType,
9
9
  type LanguageModelFinishReason,
10
10
  type LanguageModelUsage,
11
- type LanguageModelWarning,
11
+ type SharedWarning,
12
12
  type SharedProviderMetadata,
13
13
  } from "@kernl-sdk/protocol";
14
14
  import { randomID } from "@kernl-sdk/shared/lib";
@@ -16,7 +16,7 @@ import type {
16
16
  LanguageModelV3Content,
17
17
  LanguageModelV3FinishReason,
18
18
  LanguageModelV3Usage,
19
- LanguageModelV3CallWarning,
19
+ SharedV3Warning,
20
20
  JSONSchema7,
21
21
  } from "@ai-sdk/provider";
22
22
 
@@ -28,7 +28,7 @@ export interface AISdkGenerateResult {
28
28
  finishReason: LanguageModelV3FinishReason;
29
29
  usage: LanguageModelV3Usage;
30
30
  providerMetadata?: Record<string, unknown>;
31
- warnings: Array<LanguageModelV3CallWarning>;
31
+ warnings: Array<SharedV3Warning>;
32
32
  }
33
33
 
34
34
  export const MODEL_RESPONSE: Codec<LanguageModelResponse, AISdkGenerateResult> =
@@ -147,33 +147,12 @@ const USAGE: Codec<LanguageModelUsage, LanguageModelV3Usage> = {
147
147
  decode: (usage) => usage as LanguageModelUsage,
148
148
  };
149
149
 
150
- export const WARNING: Codec<LanguageModelWarning, LanguageModelV3CallWarning> =
151
- {
152
- encode: () => {
153
- throw new Error("codec:unimplemented");
154
- },
155
-
156
- decode: (warning: LanguageModelV3CallWarning) => {
157
- switch (warning.type) {
158
- case "unsupported-setting":
159
- return {
160
- type: "unsupported-setting",
161
- setting: warning.setting as any,
162
- details: warning.details,
163
- };
164
- case "other":
165
- return {
166
- type: "other",
167
- message: warning.message,
168
- };
169
- default:
170
- return {
171
- type: "other",
172
- message: "Unknown warning type",
173
- };
174
- }
175
- },
176
- };
150
+ export const WARNING: Codec<SharedWarning, SharedV3Warning> = {
151
+ encode: () => {
152
+ throw new Error("codec:unimplemented");
153
+ },
154
+ decode: (warning) => warning as SharedWarning,
155
+ };
177
156
 
178
157
  /**
179
158
  * AI SDK response format type.
@@ -147,13 +147,7 @@ export const STREAM_PART: Codec<
147
147
  return {
148
148
  kind: "finish",
149
149
  finishReason: part.finishReason as any, // Types should match
150
- usage: {
151
- inputTokens: part.usage.inputTokens,
152
- outputTokens: part.usage.outputTokens,
153
- totalTokens: part.usage.totalTokens,
154
- reasoningTokens: part.usage.reasoningTokens,
155
- cachedInputTokens: part.usage.cachedInputTokens,
156
- },
150
+ usage: part.usage,
157
151
  providerMetadata: part.providerMetadata,
158
152
  };
159
153
 
@@ -5,13 +5,13 @@ import type {
5
5
  } from "@kernl-sdk/protocol";
6
6
  import type {
7
7
  LanguageModelV3FunctionTool,
8
- LanguageModelV3ProviderDefinedTool,
8
+ LanguageModelV3ProviderTool,
9
9
  LanguageModelV3ToolChoice,
10
10
  } from "@ai-sdk/provider";
11
11
 
12
12
  export const TOOL: Codec<
13
13
  LanguageModelTool,
14
- LanguageModelV3FunctionTool | LanguageModelV3ProviderDefinedTool
14
+ LanguageModelV3FunctionTool | LanguageModelV3ProviderTool
15
15
  > = {
16
16
  encode: (tool) => {
17
17
  if (tool.kind === "function") {
@@ -23,13 +23,13 @@ export const TOOL: Codec<
23
23
  providerOptions: tool.providerOptions,
24
24
  } satisfies LanguageModelV3FunctionTool;
25
25
  } else {
26
- // provider-defined
26
+ // provider tool
27
27
  return {
28
- type: "provider-defined",
28
+ type: "provider",
29
29
  id: tool.id,
30
30
  name: tool.name,
31
31
  args: tool.args,
32
- } satisfies LanguageModelV3ProviderDefinedTool;
32
+ } satisfies LanguageModelV3ProviderTool;
33
33
  }
34
34
  },
35
35
  decode: () => {
@@ -10,16 +10,14 @@ import { EMBEDDING_SETTINGS } from "./convert/embedding";
10
10
  /**
11
11
  * EmbeddingModel adapter for the AI SDK EmbeddingModelV3.
12
12
  */
13
- export class AISDKEmbeddingModel<TValue = string>
14
- implements EmbeddingModel<TValue>
15
- {
13
+ export class AISDKEmbeddingModel implements EmbeddingModel<string> {
16
14
  readonly spec = "1.0" as const;
17
15
  readonly provider: string;
18
16
  readonly modelId: string;
19
17
  readonly maxEmbeddingsPerCall?: number;
20
18
  readonly supportsParallelCalls?: boolean;
21
19
 
22
- constructor(private model: EmbeddingModelV3<TValue>) {
20
+ constructor(private model: EmbeddingModelV3) {
23
21
  this.provider = model.provider;
24
22
  this.modelId = model.modelId;
25
23
 
@@ -33,7 +31,7 @@ export class AISDKEmbeddingModel<TValue = string>
33
31
  }
34
32
 
35
33
  async embed(
36
- request: EmbeddingModelRequest<TValue>,
34
+ request: EmbeddingModelRequest<string>,
37
35
  ): Promise<EmbeddingModelResponse> {
38
36
  const settings = request.settings
39
37
  ? EMBEDDING_SETTINGS.encode(request.settings)
@@ -0,0 +1,87 @@
1
+ import { appendFileSync } from "node:fs";
2
+ import type { OAuthCredentials } from "./types";
3
+
4
+ const TOKEN_URL = "https://console.anthropic.com/v1/oauth/token";
5
+ const CLIENT_ID = "9d1c250a-e61b-44d9-88ed-5944d1962f5e";
6
+
7
+ function debug(msg: string) {
8
+ try {
9
+ appendFileSync("/tmp/popcorn-debug.log", `${new Date().toISOString()} [oauth/anthropic] ${msg}\n`);
10
+ } catch {
11
+ // ignore
12
+ }
13
+ }
14
+
15
+ interface TokenResponse {
16
+ access_token: string;
17
+ refresh_token: string;
18
+ expires_in: number;
19
+ }
20
+
21
+ /**
22
+ * Refresh Anthropic OAuth tokens.
23
+ */
24
+ async function refresh(creds: OAuthCredentials): Promise<void> {
25
+ debug(`Refreshing token...`);
26
+ const res = await fetch(TOKEN_URL, {
27
+ method: "POST",
28
+ headers: { "Content-Type": "application/json" },
29
+ body: JSON.stringify({
30
+ grant_type: "refresh_token",
31
+ refresh_token: creds.refreshToken,
32
+ client_id: CLIENT_ID,
33
+ }),
34
+ });
35
+
36
+ if (!res.ok) {
37
+ throw new Error(`Token refresh failed: ${res.status}`);
38
+ }
39
+
40
+ const data = (await res.json()) as TokenResponse;
41
+
42
+ creds.accessToken = data.access_token;
43
+ creds.refreshToken = data.refresh_token;
44
+ creds.expiresAt = Date.now() + data.expires_in * 1000;
45
+
46
+ creds.onRefresh?.({
47
+ accessToken: creds.accessToken,
48
+ refreshToken: creds.refreshToken,
49
+ expiresAt: creds.expiresAt,
50
+ });
51
+ debug(`Token refreshed successfully`);
52
+ }
53
+
54
+ /**
55
+ * Create a fetch wrapper for Anthropic OAuth.
56
+ *
57
+ * Uses the standard Anthropic API with OAuth bearer token.
58
+ */
59
+ export function createOAuthFetch(creds: OAuthCredentials) {
60
+ return async (
61
+ input: string | URL | Request,
62
+ init?: RequestInit,
63
+ ): Promise<Response> => {
64
+ // Refresh if expired (with 30s buffer)
65
+ if (Date.now() >= creds.expiresAt - 30_000) {
66
+ await refresh(creds);
67
+ }
68
+
69
+ const headers = new Headers(init?.headers);
70
+ headers.set("Authorization", `Bearer ${creds.accessToken}`);
71
+ // Required beta header for OAuth
72
+ headers.set("anthropic-beta", "oauth-2025-04-20");
73
+
74
+ const url = typeof input === "string" ? input : input instanceof URL ? input.toString() : input.url;
75
+ debug(`Request to: ${url}`);
76
+
77
+ const response = await fetch(input, { ...init, headers });
78
+
79
+ debug(`Response status: ${response.status}`);
80
+ if (!response.ok) {
81
+ const text = await response.clone().text();
82
+ debug(`Error response: ${text.slice(0, 1000)}`);
83
+ }
84
+
85
+ return response;
86
+ };
87
+ }
@@ -0,0 +1,129 @@
1
+ import { appendFileSync } from "node:fs";
2
+ import type { OpenAIOAuthCredentials } from "./types";
3
+
4
+ const TOKEN_URL = "https://auth.openai.com/oauth/token";
5
+ const CLIENT_ID = "app_EMoamEEZ73f0CkXaXp7hrann";
6
+ const CODEX_ENDPOINT = "https://chatgpt.com/backend-api/codex/responses";
7
+
8
+ function debug(msg: string) {
9
+ try {
10
+ appendFileSync(
11
+ "/tmp/popcorn-debug.log",
12
+ `${new Date().toISOString()} [oauth/fetch] ${msg}\n`,
13
+ );
14
+ } catch {
15
+ // ignore
16
+ }
17
+ }
18
+
19
+ interface TokenResponse {
20
+ access_token: string;
21
+ refresh_token: string;
22
+ expires_in: number;
23
+ }
24
+
25
+ /**
26
+ * Refresh OpenAI OAuth tokens.
27
+ */
28
+ async function refresh(creds: OpenAIOAuthCredentials): Promise<void> {
29
+ const res = await fetch(TOKEN_URL, {
30
+ method: "POST",
31
+ headers: { "Content-Type": "application/x-www-form-urlencoded" },
32
+ body: new URLSearchParams({
33
+ grant_type: "refresh_token",
34
+ refresh_token: creds.refreshToken,
35
+ client_id: CLIENT_ID,
36
+ }),
37
+ });
38
+
39
+ if (!res.ok) {
40
+ throw new Error(`Token refresh failed: ${res.status}`);
41
+ }
42
+
43
+ const data = (await res.json()) as TokenResponse;
44
+
45
+ creds.accessToken = data.access_token;
46
+ creds.refreshToken = data.refresh_token;
47
+ creds.expiresAt = Date.now() + data.expires_in * 1000;
48
+
49
+ creds.onRefresh?.({
50
+ accessToken: creds.accessToken,
51
+ refreshToken: creds.refreshToken,
52
+ expiresAt: creds.expiresAt,
53
+ });
54
+ }
55
+
56
+ /**
57
+ * Create a fetch wrapper for OpenAI Codex OAuth.
58
+ *
59
+ * Redirects all requests to the Codex endpoint and adds OAuth headers.
60
+ */
61
+ export function createOAuthFetch(creds: OpenAIOAuthCredentials) {
62
+ return async (
63
+ input: string | URL | Request,
64
+ init?: RequestInit,
65
+ ): Promise<Response> => {
66
+ // Refresh if expired (with 30s buffer)
67
+ if (Date.now() >= creds.expiresAt - 30_000) {
68
+ await refresh(creds);
69
+ }
70
+
71
+ const headers = new Headers(init?.headers);
72
+ headers.set("Authorization", `Bearer ${creds.accessToken}`);
73
+
74
+ if (creds.accountId) {
75
+ headers.set("ChatGPT-Account-Id", creds.accountId);
76
+ }
77
+
78
+ // Debug: log request
79
+ const url =
80
+ typeof input === "string"
81
+ ? input
82
+ : input instanceof URL
83
+ ? input.toString()
84
+ : input.url;
85
+ debug(`Request to: ${url}`);
86
+ debug(`Redirecting to: ${CODEX_ENDPOINT}`);
87
+
88
+ // Transform request body for Codex API
89
+ // Codex requires "instructions" field instead of developer/system role in input
90
+ // Codex also requires "store: false"
91
+ let body = init?.body;
92
+ if (body && typeof body === "string") {
93
+ try {
94
+ const parsed = JSON.parse(body);
95
+
96
+ // Codex requires store: false
97
+ parsed.store = false;
98
+
99
+ // Extract developer/system message as instructions
100
+ if (parsed.input && Array.isArray(parsed.input)) {
101
+ const devIdx = parsed.input.findIndex(
102
+ (m: any) => m.role === "developer" || m.role === "system",
103
+ );
104
+ if (devIdx !== -1) {
105
+ const devMsg = parsed.input[devIdx];
106
+ parsed.instructions = devMsg.content;
107
+ parsed.input.splice(devIdx, 1);
108
+ }
109
+ }
110
+
111
+ body = JSON.stringify(parsed);
112
+ debug(`Transformed body: ${body.slice(0, 500)}`);
113
+ } catch {
114
+ debug(`Failed to transform body`);
115
+ }
116
+ }
117
+
118
+ const response = await fetch(CODEX_ENDPOINT, { ...init, headers, body });
119
+
120
+ // Debug: log response
121
+ debug(`Response status: ${response.status}`);
122
+ if (!response.ok) {
123
+ const text = await response.clone().text();
124
+ debug(`Error response: ${text.slice(0, 1000)}`);
125
+ }
126
+
127
+ return response;
128
+ };
129
+ }
@@ -0,0 +1,25 @@
1
+ /**
2
+ * Base OAuth credentials.
3
+ */
4
+ export interface OAuthCredentials {
5
+ /** Current access token */
6
+ accessToken: string;
7
+ /** Refresh token for obtaining new access tokens */
8
+ refreshToken: string;
9
+ /** Expiration timestamp in milliseconds */
10
+ expiresAt: number;
11
+ /** Called when tokens are refreshed - use to persist new tokens */
12
+ onRefresh?: (tokens: {
13
+ accessToken: string;
14
+ refreshToken: string;
15
+ expiresAt: number;
16
+ }) => void;
17
+ }
18
+
19
+ /**
20
+ * OpenAI OAuth credentials (ChatGPT Plus/Pro via Codex).
21
+ */
22
+ export interface OpenAIOAuthCredentials extends OAuthCredentials {
23
+ /** Account ID for org/team subscriptions */
24
+ accountId?: string;
25
+ }