@kernl-sdk/ai 0.4.3 → 0.4.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. package/.turbo/turbo-build.log +1 -1
  2. package/CHANGELOG.md +24 -0
  3. package/dist/__tests__/integration.test.js +10 -11
  4. package/dist/__tests__/language-model.test.js +15 -20
  5. package/dist/convert/__tests__/response.test.js +18 -16
  6. package/dist/convert/__tests__/stream.test.js +21 -20
  7. package/dist/convert/__tests__/tools.test.js +5 -5
  8. package/dist/convert/__tests__/ui-stream.test.js +11 -5
  9. package/dist/convert/response.d.ts +4 -4
  10. package/dist/convert/response.d.ts.map +1 -1
  11. package/dist/convert/response.js +1 -20
  12. package/dist/convert/stream.d.ts.map +1 -1
  13. package/dist/convert/stream.js +1 -7
  14. package/dist/convert/tools.d.ts +2 -2
  15. package/dist/convert/tools.d.ts.map +1 -1
  16. package/dist/convert/tools.js +2 -2
  17. package/dist/embedding-model.d.ts +3 -3
  18. package/dist/embedding-model.d.ts.map +1 -1
  19. package/dist/language-model.d.ts +7 -2
  20. package/dist/language-model.d.ts.map +1 -1
  21. package/dist/language-model.js +13 -5
  22. package/dist/oauth/openai.d.ts +8 -0
  23. package/dist/oauth/openai.d.ts.map +1 -0
  24. package/dist/oauth/openai.js +69 -0
  25. package/dist/oauth/types.d.ts +25 -0
  26. package/dist/oauth/types.d.ts.map +1 -0
  27. package/dist/oauth/types.js +1 -0
  28. package/dist/providers/anthropic.d.ts +25 -2
  29. package/dist/providers/anthropic.d.ts.map +1 -1
  30. package/dist/providers/anthropic.js +19 -1
  31. package/dist/providers/google.d.ts +4 -4
  32. package/dist/providers/google.d.ts.map +1 -1
  33. package/dist/providers/google.js +1 -1
  34. package/dist/providers/openai.d.ts +43 -3
  35. package/dist/providers/openai.d.ts.map +1 -1
  36. package/dist/providers/openai.js +40 -2
  37. package/package.json +9 -9
  38. package/src/__tests__/integration.test.ts +10 -11
  39. package/src/__tests__/language-model.test.ts +15 -20
  40. package/src/convert/__tests__/response.test.ts +23 -21
  41. package/src/convert/__tests__/stream.test.ts +21 -20
  42. package/src/convert/__tests__/tools.test.ts +6 -6
  43. package/src/convert/__tests__/ui-stream.test.ts +11 -5
  44. package/src/convert/response.ts +9 -30
  45. package/src/convert/stream.ts +1 -7
  46. package/src/convert/tools.ts +5 -5
  47. package/src/embedding-model.ts +3 -5
  48. package/src/language-model.ts +15 -5
  49. package/src/oauth/openai.ts +94 -0
  50. package/src/oauth/types.ts +25 -0
  51. package/src/providers/anthropic.ts +39 -19
  52. package/src/providers/google.ts +4 -37
  53. package/src/providers/openai.ts +68 -41
@@ -0,0 +1,69 @@
1
+ const TOKEN_URL = "https://auth.openai.com/oauth/token";
2
+ const CLIENT_ID = "app_EMoamEEZ73f0CkXaXp7hrann";
3
+ const CODEX_ENDPOINT = "https://chatgpt.com/backend-api/codex/responses";
4
+ /**
5
+ * Refresh OpenAI OAuth tokens.
6
+ */
7
+ async function refresh(creds) {
8
+ const res = await fetch(TOKEN_URL, {
9
+ method: "POST",
10
+ headers: { "Content-Type": "application/x-www-form-urlencoded" },
11
+ body: new URLSearchParams({
12
+ grant_type: "refresh_token",
13
+ refresh_token: creds.refreshToken,
14
+ client_id: CLIENT_ID,
15
+ }),
16
+ });
17
+ if (!res.ok) {
18
+ throw new Error(`Token refresh failed: ${res.status}`);
19
+ }
20
+ const data = (await res.json());
21
+ creds.accessToken = data.access_token;
22
+ creds.refreshToken = data.refresh_token;
23
+ creds.expiresAt = Date.now() + data.expires_in * 1000;
24
+ creds.onRefresh?.({
25
+ accessToken: creds.accessToken,
26
+ refreshToken: creds.refreshToken,
27
+ expiresAt: creds.expiresAt,
28
+ });
29
+ }
30
+ /**
31
+ * Create a fetch wrapper for OpenAI Codex OAuth.
32
+ *
33
+ * Redirects all requests to the Codex endpoint and adds OAuth headers.
34
+ */
35
+ export function createOAuthFetch(creds) {
36
+ return async (input, init) => {
37
+ // Refresh if expired (with 30s buffer)
38
+ if (Date.now() >= creds.expiresAt - 30_000) {
39
+ await refresh(creds);
40
+ }
41
+ const headers = new Headers(init?.headers);
42
+ headers.set("Authorization", `Bearer ${creds.accessToken}`);
43
+ if (creds.accountId) {
44
+ headers.set("ChatGPT-Account-Id", creds.accountId);
45
+ }
46
+ // Transform request body for Codex API
47
+ // Codex requires "instructions" field instead of developer/system role in input
48
+ let body = init?.body;
49
+ if (body && typeof body === "string") {
50
+ try {
51
+ const parsed = JSON.parse(body);
52
+ // Extract developer/system message as instructions
53
+ if (parsed.input && Array.isArray(parsed.input)) {
54
+ const devIdx = parsed.input.findIndex((m) => m.role === "developer" || m.role === "system");
55
+ if (devIdx !== -1) {
56
+ const devMsg = parsed.input[devIdx];
57
+ parsed.instructions = devMsg.content;
58
+ parsed.input.splice(devIdx, 1);
59
+ }
60
+ }
61
+ body = JSON.stringify(parsed);
62
+ }
63
+ catch {
64
+ // ignore parse errors
65
+ }
66
+ }
67
+ return fetch(CODEX_ENDPOINT, { ...init, headers, body });
68
+ };
69
+ }
@@ -0,0 +1,25 @@
1
+ /**
2
+ * Base OAuth credentials.
3
+ */
4
+ export interface OAuthCredentials {
5
+ /** Current access token */
6
+ accessToken: string;
7
+ /** Refresh token for obtaining new access tokens */
8
+ refreshToken: string;
9
+ /** Expiration timestamp in milliseconds */
10
+ expiresAt: number;
11
+ /** Called when tokens are refreshed - use to persist new tokens */
12
+ onRefresh?: (tokens: {
13
+ accessToken: string;
14
+ refreshToken: string;
15
+ expiresAt: number;
16
+ }) => void;
17
+ }
18
+ /**
19
+ * OpenAI OAuth credentials (ChatGPT Plus/Pro via Codex).
20
+ */
21
+ export interface OpenAIOAuthCredentials extends OAuthCredentials {
22
+ /** Account ID for org/team subscriptions */
23
+ accountId?: string;
24
+ }
25
+ //# sourceMappingURL=types.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"types.d.ts","sourceRoot":"","sources":["../../src/oauth/types.ts"],"names":[],"mappings":"AAAA;;GAEG;AACH,MAAM,WAAW,gBAAgB;IAC/B,2BAA2B;IAC3B,WAAW,EAAE,MAAM,CAAC;IACpB,oDAAoD;IACpD,YAAY,EAAE,MAAM,CAAC;IACrB,2CAA2C;IAC3C,SAAS,EAAE,MAAM,CAAC;IAClB,mEAAmE;IACnE,SAAS,CAAC,EAAE,CAAC,MAAM,EAAE;QACnB,WAAW,EAAE,MAAM,CAAC;QACpB,YAAY,EAAE,MAAM,CAAC;QACrB,SAAS,EAAE,MAAM,CAAC;KACnB,KAAK,IAAI,CAAC;CACZ;AAED;;GAEG;AACH,MAAM,WAAW,sBAAuB,SAAQ,gBAAgB;IAC9D,4CAA4C;IAC5C,SAAS,CAAC,EAAE,MAAM,CAAC;CACpB"}
@@ -0,0 +1 @@
1
+ export {};
@@ -1,10 +1,33 @@
1
+ import { anthropic as _anthropic } from "@ai-sdk/anthropic";
1
2
  import { AISDKLanguageModel } from "../language-model.js";
2
3
  /**
3
- * Anthropic model IDs.
4
+ * Anthropic model IDs (derived from @ai-sdk/anthropic).
4
5
  */
5
- export type AnthropicModelId = "claude-haiku-4-5" | "claude-haiku-4-5-20251001" | "claude-sonnet-4-5" | "claude-sonnet-4-5-20250929" | "claude-opus-4-1" | "claude-opus-4-0" | "claude-sonnet-4-0" | "claude-opus-4-1-20250805" | "claude-opus-4-20250514" | "claude-sonnet-4-20250514" | "claude-3-7-sonnet-latest" | "claude-3-7-sonnet-20250219" | "claude-3-5-haiku-latest" | "claude-3-5-haiku-20241022" | "claude-3-haiku-20240307" | (string & {});
6
+ export type AnthropicModelId = Parameters<typeof _anthropic>[0];
7
+ /**
8
+ * Options for creating a custom Anthropic provider.
9
+ */
10
+ export interface AnthropicProviderOptions {
11
+ /** API key for authentication */
12
+ apiKey?: string;
13
+ /** Custom base URL */
14
+ baseURL?: string;
15
+ /** Custom headers */
16
+ headers?: Record<string, string>;
17
+ }
18
+ /**
19
+ * Create a custom Anthropic provider with explicit credentials.
20
+ *
21
+ * @example
22
+ * ```ts
23
+ * const anthropic = createAnthropic({ apiKey: "sk-..." });
24
+ * const model = anthropic("claude-sonnet-4-5");
25
+ * ```
26
+ */
27
+ export declare function createAnthropic(options?: AnthropicProviderOptions): (modelId: AnthropicModelId) => AISDKLanguageModel;
6
28
  /**
7
29
  * Create a kernl-compatible Anthropic language model.
30
+ * Uses ANTHROPIC_API_KEY environment variable.
8
31
  *
9
32
  * @example
10
33
  * ```ts
@@ -1 +1 @@
1
- {"version":3,"file":"anthropic.d.ts","sourceRoot":"","sources":["../../src/providers/anthropic.ts"],"names":[],"mappings":"AACA,OAAO,EAAE,kBAAkB,EAAE,MAAM,mBAAmB,CAAC;AAEvD;;GAEG;AACH,MAAM,MAAM,gBAAgB,GACxB,kBAAkB,GAClB,2BAA2B,GAC3B,mBAAmB,GACnB,4BAA4B,GAC5B,iBAAiB,GACjB,iBAAiB,GACjB,mBAAmB,GACnB,0BAA0B,GAC1B,wBAAwB,GACxB,0BAA0B,GAC1B,0BAA0B,GAC1B,4BAA4B,GAC5B,yBAAyB,GACzB,2BAA2B,GAC3B,yBAAyB,GACzB,CAAC,MAAM,GAAG,EAAE,CAAC,CAAC;AAElB;;;;;;;;;;GAUG;AACH,wBAAgB,SAAS,CAAC,OAAO,EAAE,gBAAgB,sBAGlD"}
1
+ {"version":3,"file":"anthropic.d.ts","sourceRoot":"","sources":["../../src/providers/anthropic.ts"],"names":[],"mappings":"AAAA,OAAO,EACL,SAAS,IAAI,UAAU,EAExB,MAAM,mBAAmB,CAAC;AAC3B,OAAO,EAAE,kBAAkB,EAAE,MAAM,mBAAmB,CAAC;AAEvD;;GAEG;AACH,MAAM,MAAM,gBAAgB,GAAG,UAAU,CAAC,OAAO,UAAU,CAAC,CAAC,CAAC,CAAC,CAAC;AAEhE;;GAEG;AACH,MAAM,WAAW,wBAAwB;IACvC,iCAAiC;IACjC,MAAM,CAAC,EAAE,MAAM,CAAC;IAChB,sBAAsB;IACtB,OAAO,CAAC,EAAE,MAAM,CAAC;IACjB,qBAAqB;IACrB,OAAO,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC;CAClC;AAED;;;;;;;;GAQG;AACH,wBAAgB,eAAe,CAAC,OAAO,GAAE,wBAA6B,IAO5D,SAAS,gBAAgB,wBAElC;AAED;;;;;;;;;;;GAWG;AACH,wBAAgB,SAAS,CAAC,OAAO,EAAE,gBAAgB,sBAGlD"}
@@ -1,7 +1,25 @@
1
- import { anthropic as _anthropic } from "@ai-sdk/anthropic";
1
+ import { anthropic as _anthropic, createAnthropic as _createAnthropic, } from "@ai-sdk/anthropic";
2
2
  import { AISDKLanguageModel } from "../language-model.js";
3
+ /**
4
+ * Create a custom Anthropic provider with explicit credentials.
5
+ *
6
+ * @example
7
+ * ```ts
8
+ * const anthropic = createAnthropic({ apiKey: "sk-..." });
9
+ * const model = anthropic("claude-sonnet-4-5");
10
+ * ```
11
+ */
12
+ export function createAnthropic(options = {}) {
13
+ const provider = _createAnthropic({
14
+ apiKey: options.apiKey,
15
+ baseURL: options.baseURL,
16
+ headers: options.headers,
17
+ });
18
+ return (modelId) => new AISDKLanguageModel(provider(modelId));
19
+ }
3
20
  /**
4
21
  * Create a kernl-compatible Anthropic language model.
22
+ * Uses ANTHROPIC_API_KEY environment variable.
5
23
  *
6
24
  * @example
7
25
  * ```ts
@@ -1,8 +1,9 @@
1
+ import { google as _google } from "@ai-sdk/google";
1
2
  import { AISDKLanguageModel } from "../language-model.js";
2
3
  /**
3
- * Google Generative AI model IDs.
4
+ * Google model IDs (derived from @ai-sdk/google).
4
5
  */
5
- type GoogleGenerativeAIModelId = "gemini-1.5-flash" | "gemini-1.5-flash-latest" | "gemini-1.5-flash-001" | "gemini-1.5-flash-002" | "gemini-1.5-flash-8b" | "gemini-1.5-flash-8b-latest" | "gemini-1.5-flash-8b-001" | "gemini-1.5-pro" | "gemini-1.5-pro-latest" | "gemini-1.5-pro-001" | "gemini-1.5-pro-002" | "gemini-2.0-flash" | "gemini-2.0-flash-001" | "gemini-2.0-flash-live-001" | "gemini-2.0-flash-lite" | "gemini-2.0-pro-exp-02-05" | "gemini-2.0-flash-thinking-exp-01-21" | "gemini-2.0-flash-exp" | "gemini-2.5-pro" | "gemini-2.5-flash" | "gemini-2.5-flash-image-preview" | "gemini-2.5-flash-lite" | "gemini-2.5-flash-lite-preview-09-2025" | "gemini-2.5-flash-preview-04-17" | "gemini-2.5-flash-preview-09-2025" | "gemini-pro-latest" | "gemini-flash-latest" | "gemini-flash-lite-latest" | "gemini-2.5-pro-exp-03-25" | "gemini-exp-1206" | "gemma-3-12b-it" | "gemma-3-27b-it" | (string & {});
6
+ export type GoogleModelId = Parameters<typeof _google>[0];
6
7
  /**
7
8
  * Create a kernl-compatible Google Generative AI language model.
8
9
  *
@@ -14,6 +15,5 @@ type GoogleGenerativeAIModelId = "gemini-1.5-flash" | "gemini-1.5-flash-latest"
14
15
  * const response = await gemini.generate([...], {});
15
16
  * ```
16
17
  */
17
- export declare function google(modelId: GoogleGenerativeAIModelId): AISDKLanguageModel;
18
- export {};
18
+ export declare function google(modelId: GoogleModelId): AISDKLanguageModel;
19
19
  //# sourceMappingURL=google.d.ts.map
@@ -1 +1 @@
1
- {"version":3,"file":"google.d.ts","sourceRoot":"","sources":["../../src/providers/google.ts"],"names":[],"mappings":"AACA,OAAO,EAAE,kBAAkB,EAAE,MAAM,mBAAmB,CAAC;AAIvD;;GAEG;AACH,KAAK,yBAAyB,GAC1B,kBAAkB,GAClB,yBAAyB,GACzB,sBAAsB,GACtB,sBAAsB,GACtB,qBAAqB,GACrB,4BAA4B,GAC5B,yBAAyB,GACzB,gBAAgB,GAChB,uBAAuB,GACvB,oBAAoB,GACpB,oBAAoB,GACpB,kBAAkB,GAClB,sBAAsB,GACtB,2BAA2B,GAC3B,uBAAuB,GACvB,0BAA0B,GAC1B,qCAAqC,GACrC,sBAAsB,GACtB,gBAAgB,GAChB,kBAAkB,GAClB,gCAAgC,GAChC,uBAAuB,GACvB,uCAAuC,GACvC,gCAAgC,GAChC,kCAAkC,GAClC,mBAAmB,GACnB,qBAAqB,GACrB,0BAA0B,GAC1B,0BAA0B,GAC1B,iBAAiB,GACjB,gBAAgB,GAChB,gBAAgB,GAChB,CAAC,MAAM,GAAG,EAAE,CAAC,CAAC;AAElB;;;;;;;;;;GAUG;AACH,wBAAgB,MAAM,CAAC,OAAO,EAAE,yBAAyB,sBAGxD"}
1
+ {"version":3,"file":"google.d.ts","sourceRoot":"","sources":["../../src/providers/google.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,MAAM,IAAI,OAAO,EAAE,MAAM,gBAAgB,CAAC;AAEnD,OAAO,EAAE,kBAAkB,EAAE,MAAM,mBAAmB,CAAC;AAGvD;;GAEG;AACH,MAAM,MAAM,aAAa,GAAG,UAAU,CAAC,OAAO,OAAO,CAAC,CAAC,CAAC,CAAC,CAAC;AAE1D;;;;;;;;;;GAUG;AACH,wBAAgB,MAAM,CAAC,OAAO,EAAE,aAAa,sBAG5C"}
@@ -1,7 +1,7 @@
1
1
  import { google as _google } from "@ai-sdk/google";
2
+ import { registerEmbeddingProvider } from "@kernl-sdk/retrieval";
2
3
  import { AISDKLanguageModel } from "../language-model.js";
3
4
  import { AISDKEmbeddingModel } from "../embedding-model.js";
4
- import { registerEmbeddingProvider } from "@kernl-sdk/retrieval";
5
5
  /**
6
6
  * Create a kernl-compatible Google Generative AI language model.
7
7
  *
@@ -1,10 +1,50 @@
1
+ import { openai as _openai } from "@ai-sdk/openai";
1
2
  import { AISDKLanguageModel } from "../language-model.js";
3
+ import type { OpenAIOAuthCredentials } from "../oauth/types.js";
2
4
  /**
3
- * OpenAI model IDs.
5
+ * OpenAI model IDs (derived from @ai-sdk/openai).
4
6
  */
5
- type OpenAIModelId = "chatgpt-4o-latest" | "gpt-3.5-turbo-0125" | "gpt-3.5-turbo-1106" | "gpt-3.5-turbo" | "gpt-4-0613" | "gpt-4-turbo-2024-04-09" | "gpt-4-turbo" | "gpt-4.1-2025-04-14" | "gpt-4.1-mini-2025-04-14" | "gpt-4.1-mini" | "gpt-4.1-nano-2025-04-14" | "gpt-4.1-nano" | "gpt-4.1" | "gpt-4" | "gpt-4o-2024-05-13" | "gpt-4o-2024-08-06" | "gpt-4o-2024-11-20" | "gpt-4o-mini-2024-07-18" | "gpt-4o-mini" | "gpt-4o" | "gpt-5-2025-08-07" | "gpt-5-chat-latest" | "gpt-5-codex" | "gpt-5-mini-2025-08-07" | "gpt-5-mini" | "gpt-5-nano-2025-08-07" | "gpt-5-nano" | "gpt-5-pro-2025-10-06" | "gpt-5-pro" | "gpt-5" | "o1-2024-12-17" | "o1" | "o3-2025-04-16" | "o3-mini-2025-01-31" | "o3-mini" | "o3" | (string & {});
7
+ export type OpenAIModelId = Parameters<typeof _openai>[0];
8
+ /**
9
+ * Options for creating a custom OpenAI provider.
10
+ */
11
+ export interface OpenAIProviderOptions {
12
+ /** API key for standard authentication */
13
+ apiKey?: string;
14
+ /** OAuth credentials for ChatGPT Plus/Pro (Codex) authentication */
15
+ oauth?: OpenAIOAuthCredentials;
16
+ /** Custom base URL (ignored for OAuth - uses Codex endpoint) */
17
+ baseURL?: string;
18
+ /** Custom headers */
19
+ headers?: Record<string, string>;
20
+ }
21
+ /**
22
+ * Create a custom OpenAI provider with explicit credentials.
23
+ *
24
+ * @example API key auth
25
+ * ```ts
26
+ * const openai = createOpenAI({ apiKey: "sk-..." });
27
+ * const model = openai("gpt-4o");
28
+ * ```
29
+ *
30
+ * @example OAuth auth (ChatGPT Plus/Pro via Codex)
31
+ * ```ts
32
+ * const openai = createOpenAI({
33
+ * oauth: {
34
+ * accessToken: "...",
35
+ * refreshToken: "...",
36
+ * expiresAt: Date.now() + 3600000,
37
+ * accountId: "...", // for org subscriptions
38
+ * onRefresh: (tokens) => saveTokens(tokens),
39
+ * }
40
+ * });
41
+ * const model = openai("gpt-4o");
42
+ * ```
43
+ */
44
+ export declare function createOpenAI(options?: OpenAIProviderOptions): (modelId: OpenAIModelId) => AISDKLanguageModel;
6
45
  /**
7
46
  * Create a kernl-compatible OpenAI language model.
47
+ * Uses OPENAI_API_KEY environment variable.
8
48
  *
9
49
  * @example
10
50
  * ```ts
@@ -15,5 +55,5 @@ type OpenAIModelId = "chatgpt-4o-latest" | "gpt-3.5-turbo-0125" | "gpt-3.5-turbo
15
55
  * ```
16
56
  */
17
57
  export declare function openai(modelId: OpenAIModelId): AISDKLanguageModel;
18
- export {};
58
+ export type { OpenAIOAuthCredentials } from "../oauth/types.js";
19
59
  //# sourceMappingURL=openai.d.ts.map
@@ -1 +1 @@
1
- {"version":3,"file":"openai.d.ts","sourceRoot":"","sources":["../../src/providers/openai.ts"],"names":[],"mappings":"AACA,OAAO,EAAE,kBAAkB,EAAE,MAAM,mBAAmB,CAAC;AAIvD;;GAEG;AACH,KAAK,aAAa,GACd,mBAAmB,GACnB,oBAAoB,GACpB,oBAAoB,GACpB,eAAe,GACf,YAAY,GACZ,wBAAwB,GACxB,aAAa,GACb,oBAAoB,GACpB,yBAAyB,GACzB,cAAc,GACd,yBAAyB,GACzB,cAAc,GACd,SAAS,GACT,OAAO,GACP,mBAAmB,GACnB,mBAAmB,GACnB,mBAAmB,GACnB,wBAAwB,GACxB,aAAa,GACb,QAAQ,GACR,kBAAkB,GAClB,mBAAmB,GACnB,aAAa,GACb,uBAAuB,GACvB,YAAY,GACZ,uBAAuB,GACvB,YAAY,GACZ,sBAAsB,GACtB,WAAW,GACX,OAAO,GACP,eAAe,GACf,IAAI,GACJ,eAAe,GACf,oBAAoB,GACpB,SAAS,GACT,IAAI,GACJ,CAAC,MAAM,GAAG,EAAE,CAAC,CAAC;AAElB;;;;;;;;;;GAUG;AACH,wBAAgB,MAAM,CAAC,OAAO,EAAE,aAAa,sBAG5C"}
1
+ {"version":3,"file":"openai.d.ts","sourceRoot":"","sources":["../../src/providers/openai.ts"],"names":[],"mappings":"AAAA,OAAO,EACL,MAAM,IAAI,OAAO,EAElB,MAAM,gBAAgB,CAAC;AAGxB,OAAO,EAAE,kBAAkB,EAAE,MAAM,mBAAmB,CAAC;AAGvD,OAAO,KAAK,EAAE,sBAAsB,EAAE,MAAM,gBAAgB,CAAC;AAE7D;;GAEG;AACH,MAAM,MAAM,aAAa,GAAG,UAAU,CAAC,OAAO,OAAO,CAAC,CAAC,CAAC,CAAC,CAAC;AAE1D;;GAEG;AACH,MAAM,WAAW,qBAAqB;IACpC,0CAA0C;IAC1C,MAAM,CAAC,EAAE,MAAM,CAAC;IAChB,oEAAoE;IACpE,KAAK,CAAC,EAAE,sBAAsB,CAAC;IAC/B,gEAAgE;IAChE,OAAO,CAAC,EAAE,MAAM,CAAC;IACjB,qBAAqB;IACrB,OAAO,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC;CAClC;AAED;;;;;;;;;;;;;;;;;;;;;;GAsBG;AACH,wBAAgB,YAAY,CAAC,OAAO,GAAE,qBAA0B,IAatD,SAAS,aAAa,wBAE/B;AAED;;;;;;;;;;;GAWG;AACH,wBAAgB,MAAM,CAAC,OAAO,EAAE,aAAa,sBAG5C;AAGD,YAAY,EAAE,sBAAsB,EAAE,MAAM,gBAAgB,CAAC"}
@@ -1,9 +1,47 @@
1
- import { openai as _openai } from "@ai-sdk/openai";
1
+ import { openai as _openai, createOpenAI as _createOpenAI, } from "@ai-sdk/openai";
2
+ import { registerEmbeddingProvider } from "@kernl-sdk/retrieval";
2
3
  import { AISDKLanguageModel } from "../language-model.js";
3
4
  import { AISDKEmbeddingModel } from "../embedding-model.js";
4
- import { registerEmbeddingProvider } from "@kernl-sdk/retrieval";
5
+ import { createOAuthFetch } from "../oauth/openai.js";
6
+ /**
7
+ * Create a custom OpenAI provider with explicit credentials.
8
+ *
9
+ * @example API key auth
10
+ * ```ts
11
+ * const openai = createOpenAI({ apiKey: "sk-..." });
12
+ * const model = openai("gpt-4o");
13
+ * ```
14
+ *
15
+ * @example OAuth auth (ChatGPT Plus/Pro via Codex)
16
+ * ```ts
17
+ * const openai = createOpenAI({
18
+ * oauth: {
19
+ * accessToken: "...",
20
+ * refreshToken: "...",
21
+ * expiresAt: Date.now() + 3600000,
22
+ * accountId: "...", // for org subscriptions
23
+ * onRefresh: (tokens) => saveTokens(tokens),
24
+ * }
25
+ * });
26
+ * const model = openai("gpt-4o");
27
+ * ```
28
+ */
29
+ export function createOpenAI(options = {}) {
30
+ const provider = _createOpenAI({
31
+ apiKey: options.oauth ? undefined : options.apiKey,
32
+ baseURL: options.oauth ? undefined : options.baseURL,
33
+ headers: options.headers,
34
+ fetch: options.oauth ? createOAuthFetch(options.oauth) : undefined,
35
+ });
36
+ // OAuth requires store: false - Codex endpoint doesn't persist items
37
+ const settings = options.oauth
38
+ ? { providerOptions: { openai: { store: false } } }
39
+ : undefined;
40
+ return (modelId) => new AISDKLanguageModel(provider(modelId), settings);
41
+ }
5
42
  /**
6
43
  * Create a kernl-compatible OpenAI language model.
44
+ * Uses OPENAI_API_KEY environment variable.
7
45
  *
8
46
  * @example
9
47
  * ```ts
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@kernl-sdk/ai",
3
- "version": "0.4.3",
3
+ "version": "0.4.5",
4
4
  "description": "Vercel AI SDK adapter for kernl",
5
5
  "keywords": [
6
6
  "kernl",
@@ -43,7 +43,7 @@
43
43
  }
44
44
  },
45
45
  "peerDependencies": {
46
- "@ai-sdk/provider": "^3.0.0-beta.15"
46
+ "@ai-sdk/provider": "^3.0.3"
47
47
  },
48
48
  "peerDependenciesMeta": {
49
49
  "@ai-sdk/anthropic": {
@@ -57,10 +57,10 @@
57
57
  }
58
58
  },
59
59
  "devDependencies": {
60
- "@ai-sdk/anthropic": "3.0.0-beta.53",
61
- "@ai-sdk/google": "3.0.0-beta.43",
62
- "@ai-sdk/openai": "3.0.0-beta.57",
63
- "@ai-sdk/provider": "3.0.0-beta.15",
60
+ "@ai-sdk/anthropic": "^3.0.14",
61
+ "@ai-sdk/google": "^3.0.9",
62
+ "@ai-sdk/openai": "^3.0.11",
63
+ "@ai-sdk/provider": "^3.0.3",
64
64
  "@types/node": "^24.10.0",
65
65
  "ai": "^5.0.93",
66
66
  "tsc-alias": "^1.8.10",
@@ -68,10 +68,10 @@
68
68
  "vitest": "^4.0.8"
69
69
  },
70
70
  "dependencies": {
71
- "@kernl-sdk/protocol": "0.5.0",
71
+ "@kernl-sdk/protocol": "0.5.1",
72
+ "@kernl-sdk/retrieval": "0.1.10",
72
73
  "@kernl-sdk/shared": "^0.4.0",
73
- "@kernl-sdk/retrieval": "0.1.9",
74
- "kernl": "0.12.2"
74
+ "kernl": "0.12.3"
75
75
  },
76
76
  "scripts": {
77
77
  "build": "tsc && tsc-alias --resolve-full-paths",
@@ -64,9 +64,8 @@ describe.skipIf(SKIP_OPENAI_TESTS)("AISDKLanguageModel - OpenAI", () => {
64
64
  expect(response.content).toBeDefined();
65
65
  expect(response.content.length).toBeGreaterThan(0);
66
66
  expect(response.usage).toBeDefined();
67
- expect(response.usage.totalTokens).toBeGreaterThan(0);
68
- expect(response.usage.inputTokens).toBeGreaterThan(0);
69
- expect(response.usage.outputTokens).toBeGreaterThan(0);
67
+ expect(response.usage.inputTokens.total).toBeGreaterThan(0);
68
+ expect(response.usage.outputTokens.total).toBeGreaterThan(0);
70
69
 
71
70
  // Should have at least one message
72
71
  const messages = response.content.filter(
@@ -103,7 +102,7 @@ describe.skipIf(SKIP_OPENAI_TESTS)("AISDKLanguageModel - OpenAI", () => {
103
102
  });
104
103
 
105
104
  expect(response.content).toBeDefined();
106
- expect(response.usage.totalTokens).toBeGreaterThan(0);
105
+ expect(response.usage.inputTokens.total).toBeGreaterThan(0);
107
106
  });
108
107
 
109
108
  it("should handle multi-turn conversations", async () => {
@@ -135,7 +134,7 @@ describe.skipIf(SKIP_OPENAI_TESTS)("AISDKLanguageModel - OpenAI", () => {
135
134
  });
136
135
 
137
136
  expect(response.content).toBeDefined();
138
- expect(response.usage.totalTokens).toBeGreaterThan(0);
137
+ expect(response.usage.inputTokens.total).toBeGreaterThan(0);
139
138
 
140
139
  // Check that it remembers the name (should mention Alice)
141
140
  const assistantMessages = response.content.filter(
@@ -161,7 +160,7 @@ describe.skipIf(SKIP_OPENAI_TESTS)("AISDKLanguageModel - OpenAI", () => {
161
160
  });
162
161
 
163
162
  expect(response.content).toBeDefined();
164
- expect(response.usage.totalTokens).toBeGreaterThan(0);
163
+ expect(response.usage.inputTokens.total).toBeGreaterThan(0);
165
164
  });
166
165
 
167
166
  it("should respect maxTokens setting", async () => {
@@ -181,8 +180,8 @@ describe.skipIf(SKIP_OPENAI_TESTS)("AISDKLanguageModel - OpenAI", () => {
181
180
  });
182
181
 
183
182
  expect(response.content).toBeDefined();
184
- expect(response.usage.outputTokens).toBeDefined();
185
- expect(response.usage.outputTokens).toBeLessThanOrEqual(20);
183
+ expect(response.usage.outputTokens.total).toBeDefined();
184
+ expect(response.usage.outputTokens.total).toBeLessThanOrEqual(20);
186
185
  });
187
186
  });
188
187
 
@@ -216,7 +215,7 @@ describe.skipIf(SKIP_OPENAI_TESTS)("AISDKLanguageModel - OpenAI", () => {
216
215
  // Should have usage information
217
216
  const finishEvent = finishEvents[0] as any;
218
217
  expect(finishEvent.usage).toBeDefined();
219
- expect(finishEvent.usage.totalTokens).toBeGreaterThan(0);
218
+ expect(finishEvent.usage.inputTokens.total).toBeGreaterThan(0);
220
219
  });
221
220
 
222
221
  it("should stream text deltas", async () => {
@@ -1141,7 +1140,7 @@ describe.skipIf(SKIP_GOOGLE_TESTS)("AISDKLanguageModel - Google", () => {
1141
1140
  expect(response.content).toBeDefined();
1142
1141
  expect(response.content.length).toBeGreaterThan(0);
1143
1142
  expect(response.usage).toBeDefined();
1144
- expect(response.usage.totalTokens).toBeGreaterThan(0);
1143
+ expect(response.usage.inputTokens.total).toBeGreaterThan(0);
1145
1144
 
1146
1145
  const messages = response.content.filter(
1147
1146
  (item) => item.kind === "message",
@@ -1180,7 +1179,7 @@ describe.skipIf(SKIP_GOOGLE_TESTS)("AISDKLanguageModel - Google", () => {
1180
1179
  // Should have usage information
1181
1180
  const finishEvent = finishEvents[0] as any;
1182
1181
  expect(finishEvent.usage).toBeDefined();
1183
- expect(finishEvent.usage.totalTokens).toBeGreaterThan(0);
1182
+ expect(finishEvent.usage.inputTokens.total).toBeGreaterThan(0);
1184
1183
  });
1185
1184
  });
1186
1185
 
@@ -51,11 +51,10 @@ describe("AISDKLanguageModel", () => {
51
51
  },
52
52
  {
53
53
  type: "finish",
54
- finishReason: "stop",
54
+ finishReason: { unified: "stop", raw: "stop" },
55
55
  usage: {
56
- inputTokens: 5,
57
- outputTokens: 10,
58
- totalTokens: 15,
56
+ inputTokens: { total: 5, noCache: 5, cacheRead: undefined, cacheWrite: undefined },
57
+ outputTokens: { total: 10, text: 10, reasoning: undefined },
59
58
  },
60
59
  providerMetadata: undefined,
61
60
  },
@@ -163,11 +162,10 @@ describe("AISDKLanguageModel", () => {
163
162
  },
164
163
  {
165
164
  type: "finish",
166
- finishReason: "stop",
165
+ finishReason: { unified: "stop", raw: "stop" },
167
166
  usage: {
168
- inputTokens: 5,
169
- outputTokens: 20,
170
- totalTokens: 25,
167
+ inputTokens: { total: 5, noCache: 5, cacheRead: undefined, cacheWrite: undefined },
168
+ outputTokens: { total: 20, text: 20, reasoning: undefined },
171
169
  },
172
170
  providerMetadata: undefined,
173
171
  },
@@ -272,11 +270,10 @@ describe("AISDKLanguageModel", () => {
272
270
  },
273
271
  {
274
272
  type: "finish",
275
- finishReason: "stop",
273
+ finishReason: { unified: "stop", raw: "stop" },
276
274
  usage: {
277
- inputTokens: 5,
278
- outputTokens: 10,
279
- totalTokens: 15,
275
+ inputTokens: { total: 5, noCache: 5, cacheRead: undefined, cacheWrite: undefined },
276
+ outputTokens: { total: 10, text: 10, reasoning: undefined },
280
277
  },
281
278
  providerMetadata: undefined,
282
279
  },
@@ -353,11 +350,10 @@ describe("AISDKLanguageModel", () => {
353
350
  },
354
351
  {
355
352
  type: "finish",
356
- finishReason: "stop",
353
+ finishReason: { unified: "stop", raw: "stop" },
357
354
  usage: {
358
- inputTokens: 5,
359
- outputTokens: 10,
360
- totalTokens: 15,
355
+ inputTokens: { total: 5, noCache: 5, cacheRead: undefined, cacheWrite: undefined },
356
+ outputTokens: { total: 10, text: 10, reasoning: undefined },
361
357
  },
362
358
  providerMetadata: undefined,
363
359
  },
@@ -413,11 +409,10 @@ describe("AISDKLanguageModel", () => {
413
409
  },
414
410
  {
415
411
  type: "finish",
416
- finishReason: "tool-calls",
412
+ finishReason: { unified: "tool-calls", raw: "tool_calls" },
417
413
  usage: {
418
- inputTokens: 5,
419
- outputTokens: 10,
420
- totalTokens: 15,
414
+ inputTokens: { total: 5, noCache: 5, cacheRead: undefined, cacheWrite: undefined },
415
+ outputTokens: { total: 10, text: 10, reasoning: undefined },
421
416
  },
422
417
  providerMetadata: undefined,
423
418
  },
@@ -1,51 +1,53 @@
1
1
  import { describe, it, expect } from "vitest";
2
- import type { LanguageModelV3CallWarning } from "@ai-sdk/provider";
2
+ import type { SharedV3Warning } from "@ai-sdk/provider";
3
3
 
4
4
  import { WARNING } from "../response";
5
5
 
6
6
  describe("WARNING codec", () => {
7
7
  describe("decode", () => {
8
- it("should decode unsupported-setting warning", () => {
9
- const aiWarning: LanguageModelV3CallWarning = {
10
- type: "unsupported-setting",
11
- setting: "someUnsupportedSetting",
12
- details: "This setting is not supported by the provider",
8
+ it("should decode unsupported warning", () => {
9
+ const aiWarning: SharedV3Warning = {
10
+ type: "unsupported",
11
+ feature: "someUnsupportedFeature",
12
+ details: "This feature is not supported by the provider",
13
13
  };
14
14
 
15
15
  const result = WARNING.decode(aiWarning);
16
16
 
17
17
  expect(result).toEqual({
18
- type: "unsupported-setting",
19
- setting: "someUnsupportedSetting",
20
- details: "This setting is not supported by the provider",
18
+ type: "unsupported",
19
+ feature: "someUnsupportedFeature",
20
+ details: "This feature is not supported by the provider",
21
21
  });
22
22
  });
23
23
 
24
- it("should decode other warning", () => {
25
- const aiWarning: LanguageModelV3CallWarning = {
26
- type: "other",
27
- message: "Some custom warning message",
24
+ it("should decode compatibility warning", () => {
25
+ const aiWarning: SharedV3Warning = {
26
+ type: "compatibility",
27
+ feature: "someFeature",
28
+ details: "Running in compatibility mode",
28
29
  };
29
30
 
30
31
  const result = WARNING.decode(aiWarning);
31
32
 
32
33
  expect(result).toEqual({
33
- type: "other",
34
- message: "Some custom warning message",
34
+ type: "compatibility",
35
+ feature: "someFeature",
36
+ details: "Running in compatibility mode",
35
37
  });
36
38
  });
37
39
 
38
- it("should handle unknown warning type", () => {
39
- const aiWarning = {
40
- type: "unknown-type",
41
- someField: "value",
42
- } as any;
40
+ it("should decode other warning", () => {
41
+ const aiWarning: SharedV3Warning = {
42
+ type: "other",
43
+ message: "Some custom warning message",
44
+ };
43
45
 
44
46
  const result = WARNING.decode(aiWarning);
45
47
 
46
48
  expect(result).toEqual({
47
49
  type: "other",
48
- message: "Unknown warning type",
50
+ message: "Some custom warning message",
49
51
  });
50
52
  });
51
53
  });