@kernl-sdk/ai 0.4.3 → 0.4.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. package/.turbo/turbo-build.log +1 -1
  2. package/CHANGELOG.md +24 -0
  3. package/dist/__tests__/integration.test.js +10 -11
  4. package/dist/__tests__/language-model.test.js +15 -20
  5. package/dist/convert/__tests__/response.test.js +18 -16
  6. package/dist/convert/__tests__/stream.test.js +21 -20
  7. package/dist/convert/__tests__/tools.test.js +5 -5
  8. package/dist/convert/__tests__/ui-stream.test.js +11 -5
  9. package/dist/convert/response.d.ts +4 -4
  10. package/dist/convert/response.d.ts.map +1 -1
  11. package/dist/convert/response.js +1 -20
  12. package/dist/convert/stream.d.ts.map +1 -1
  13. package/dist/convert/stream.js +1 -7
  14. package/dist/convert/tools.d.ts +2 -2
  15. package/dist/convert/tools.d.ts.map +1 -1
  16. package/dist/convert/tools.js +2 -2
  17. package/dist/embedding-model.d.ts +3 -3
  18. package/dist/embedding-model.d.ts.map +1 -1
  19. package/dist/language-model.d.ts +7 -2
  20. package/dist/language-model.d.ts.map +1 -1
  21. package/dist/language-model.js +13 -5
  22. package/dist/oauth/openai.d.ts +8 -0
  23. package/dist/oauth/openai.d.ts.map +1 -0
  24. package/dist/oauth/openai.js +69 -0
  25. package/dist/oauth/types.d.ts +25 -0
  26. package/dist/oauth/types.d.ts.map +1 -0
  27. package/dist/oauth/types.js +1 -0
  28. package/dist/providers/anthropic.d.ts +25 -2
  29. package/dist/providers/anthropic.d.ts.map +1 -1
  30. package/dist/providers/anthropic.js +19 -1
  31. package/dist/providers/google.d.ts +4 -4
  32. package/dist/providers/google.d.ts.map +1 -1
  33. package/dist/providers/google.js +1 -1
  34. package/dist/providers/openai.d.ts +43 -3
  35. package/dist/providers/openai.d.ts.map +1 -1
  36. package/dist/providers/openai.js +40 -2
  37. package/package.json +9 -9
  38. package/src/__tests__/integration.test.ts +10 -11
  39. package/src/__tests__/language-model.test.ts +15 -20
  40. package/src/convert/__tests__/response.test.ts +23 -21
  41. package/src/convert/__tests__/stream.test.ts +21 -20
  42. package/src/convert/__tests__/tools.test.ts +6 -6
  43. package/src/convert/__tests__/ui-stream.test.ts +11 -5
  44. package/src/convert/response.ts +9 -30
  45. package/src/convert/stream.ts +1 -7
  46. package/src/convert/tools.ts +5 -5
  47. package/src/embedding-model.ts +3 -5
  48. package/src/language-model.ts +15 -5
  49. package/src/oauth/openai.ts +94 -0
  50. package/src/oauth/types.ts +25 -0
  51. package/src/providers/anthropic.ts +39 -19
  52. package/src/providers/google.ts +4 -37
  53. package/src/providers/openai.ts +68 -41
@@ -259,8 +259,8 @@ describe("STREAM_PART codec", () => {
259
259
  type: "stream-start",
260
260
  warnings: [
261
261
  {
262
- type: "unsupported-setting",
263
- setting: "topK",
262
+ type: "unsupported",
263
+ feature: "topK",
264
264
  },
265
265
  ],
266
266
  };
@@ -271,9 +271,8 @@ describe("STREAM_PART codec", () => {
271
271
  kind: "stream.start",
272
272
  warnings: [
273
273
  {
274
- type: "unsupported-setting",
275
- setting: "topK",
276
- details: undefined,
274
+ type: "unsupported",
275
+ feature: "topK",
277
276
  },
278
277
  ],
279
278
  });
@@ -282,11 +281,10 @@ describe("STREAM_PART codec", () => {
282
281
  it("should decode finish event", () => {
283
282
  const part: LanguageModelV3StreamPart = {
284
283
  type: "finish",
285
- finishReason: "stop",
284
+ finishReason: { unified: "stop", raw: "stop" },
286
285
  usage: {
287
- inputTokens: 10,
288
- outputTokens: 20,
289
- totalTokens: 30,
286
+ inputTokens: { total: 10, noCache: 8, cacheRead: 2, cacheWrite: undefined },
287
+ outputTokens: { total: 20, text: 18, reasoning: 2 },
290
288
  },
291
289
  providerMetadata: undefined,
292
290
  };
@@ -295,13 +293,10 @@ describe("STREAM_PART codec", () => {
295
293
 
296
294
  expect(result).toEqual({
297
295
  kind: "finish",
298
- finishReason: "stop",
296
+ finishReason: { unified: "stop", raw: "stop" },
299
297
  usage: {
300
- inputTokens: 10,
301
- outputTokens: 20,
302
- totalTokens: 30,
303
- reasoningTokens: undefined,
304
- cachedInputTokens: undefined,
298
+ inputTokens: { total: 10, noCache: 8, cacheRead: 2, cacheWrite: undefined },
299
+ outputTokens: { total: 20, text: 18, reasoning: 2 },
305
300
  },
306
301
  providerMetadata: undefined,
307
302
  });
@@ -405,8 +400,11 @@ describe("convertStream", () => {
405
400
  { type: "text-end", id: "text-1", providerMetadata: undefined },
406
401
  {
407
402
  type: "finish",
408
- finishReason: "stop",
409
- usage: { inputTokens: 5, outputTokens: 10, totalTokens: 15 },
403
+ finishReason: { unified: "stop", raw: "stop" },
404
+ usage: {
405
+ inputTokens: { total: 5, noCache: 5, cacheRead: undefined, cacheWrite: undefined },
406
+ outputTokens: { total: 10, text: 10, reasoning: undefined },
407
+ },
410
408
  providerMetadata: undefined,
411
409
  },
412
410
  ];
@@ -429,7 +427,7 @@ describe("convertStream", () => {
429
427
  expect(events[0]).toMatchObject({ kind: "text.start" });
430
428
  expect(events[1]).toMatchObject({ kind: "text.delta", text: "Hello" });
431
429
  expect(events[2]).toMatchObject({ kind: "text.end" });
432
- expect(events[3]).toMatchObject({ kind: "finish", finishReason: "stop" });
430
+ expect(events[3]).toMatchObject({ kind: "finish", finishReason: { unified: "stop", raw: "stop" } });
433
431
  });
434
432
 
435
433
  it("should filter out null events", async () => {
@@ -445,8 +443,11 @@ describe("convertStream", () => {
445
443
  } as any, // This should be filtered out (returns null from default case)
446
444
  {
447
445
  type: "finish",
448
- finishReason: "stop",
449
- usage: { inputTokens: 5, outputTokens: 10, totalTokens: 15 },
446
+ finishReason: { unified: "stop", raw: "stop" },
447
+ usage: {
448
+ inputTokens: { total: 5, noCache: 5, cacheRead: undefined, cacheWrite: undefined },
449
+ outputTokens: { total: 10, text: 10, reasoning: undefined },
450
+ },
450
451
  providerMetadata: undefined,
451
452
  },
452
453
  ];
@@ -1,7 +1,7 @@
1
1
  import { describe, it, expect } from "vitest";
2
2
  import type {
3
3
  LanguageModelV3FunctionTool,
4
- LanguageModelV3ProviderDefinedTool,
4
+ LanguageModelV3ProviderTool,
5
5
  LanguageModelV3ToolChoice,
6
6
  } from "@ai-sdk/provider";
7
7
 
@@ -96,8 +96,8 @@ describe("TOOL codec", () => {
96
96
  });
97
97
  });
98
98
 
99
- describe("encode - provider-defined tools", () => {
100
- it("should encode provider-defined tool", () => {
99
+ describe("encode - provider tools", () => {
100
+ it("should encode provider tool", () => {
101
101
  const result = TOOL.encode({
102
102
  kind: "provider-defined",
103
103
  id: "mcp.tool-123",
@@ -106,14 +106,14 @@ describe("TOOL codec", () => {
106
106
  });
107
107
 
108
108
  expect(result).toEqual({
109
- type: "provider-defined",
109
+ type: "provider",
110
110
  id: "mcp.tool-123",
111
111
  name: "custom_mcp_tool",
112
112
  args: { param1: "value1" },
113
113
  });
114
114
  });
115
115
 
116
- it("should encode provider-defined tool without args", () => {
116
+ it("should encode provider tool without args", () => {
117
117
  const result = TOOL.encode({
118
118
  kind: "provider-defined",
119
119
  id: "mcp.tool-id",
@@ -122,7 +122,7 @@ describe("TOOL codec", () => {
122
122
  });
123
123
 
124
124
  expect(result).toEqual({
125
- type: "provider-defined",
125
+ type: "provider",
126
126
  id: "mcp.tool-id",
127
127
  name: "tool_name",
128
128
  args: {},
@@ -274,11 +274,10 @@ describe("STREAM_UI_PART codec", () => {
274
274
  it("should encode finish event", () => {
275
275
  const event: LanguageModelStreamEvent = {
276
276
  kind: "finish",
277
- finishReason: "stop",
277
+ finishReason: { unified: "stop", raw: "stop" },
278
278
  usage: {
279
- inputTokens: 100,
280
- outputTokens: 50,
281
- totalTokens: 150,
279
+ inputTokens: { total: 100, noCache: 100, cacheRead: undefined, cacheWrite: undefined },
280
+ outputTokens: { total: 50, text: 50, reasoning: undefined },
282
281
  },
283
282
  };
284
283
 
@@ -421,7 +420,14 @@ describe("toUIMessageStream", () => {
421
420
  { kind: "text.delta", id: "text-1", text: "Hello" },
422
421
  { kind: "text.delta", id: "text-1", text: " world" },
423
422
  { kind: "text.end", id: "text-1" },
424
- { kind: "finish", finishReason: "stop", usage: { inputTokens: 10, outputTokens: 5, totalTokens: 15 } },
423
+ {
424
+ kind: "finish",
425
+ finishReason: { unified: "stop", raw: "stop" },
426
+ usage: {
427
+ inputTokens: { total: 10, noCache: 10, cacheRead: undefined, cacheWrite: undefined },
428
+ outputTokens: { total: 5, text: 5, reasoning: undefined },
429
+ },
430
+ },
425
431
  ];
426
432
 
427
433
  async function* generateEvents() {
@@ -8,7 +8,7 @@ import {
8
8
  type LanguageModelResponseType,
9
9
  type LanguageModelFinishReason,
10
10
  type LanguageModelUsage,
11
- type LanguageModelWarning,
11
+ type SharedWarning,
12
12
  type SharedProviderMetadata,
13
13
  } from "@kernl-sdk/protocol";
14
14
  import { randomID } from "@kernl-sdk/shared/lib";
@@ -16,7 +16,7 @@ import type {
16
16
  LanguageModelV3Content,
17
17
  LanguageModelV3FinishReason,
18
18
  LanguageModelV3Usage,
19
- LanguageModelV3CallWarning,
19
+ SharedV3Warning,
20
20
  JSONSchema7,
21
21
  } from "@ai-sdk/provider";
22
22
 
@@ -28,7 +28,7 @@ export interface AISdkGenerateResult {
28
28
  finishReason: LanguageModelV3FinishReason;
29
29
  usage: LanguageModelV3Usage;
30
30
  providerMetadata?: Record<string, unknown>;
31
- warnings: Array<LanguageModelV3CallWarning>;
31
+ warnings: Array<SharedV3Warning>;
32
32
  }
33
33
 
34
34
  export const MODEL_RESPONSE: Codec<LanguageModelResponse, AISdkGenerateResult> =
@@ -147,33 +147,12 @@ const USAGE: Codec<LanguageModelUsage, LanguageModelV3Usage> = {
147
147
  decode: (usage) => usage as LanguageModelUsage,
148
148
  };
149
149
 
150
- export const WARNING: Codec<LanguageModelWarning, LanguageModelV3CallWarning> =
151
- {
152
- encode: () => {
153
- throw new Error("codec:unimplemented");
154
- },
155
-
156
- decode: (warning: LanguageModelV3CallWarning) => {
157
- switch (warning.type) {
158
- case "unsupported-setting":
159
- return {
160
- type: "unsupported-setting",
161
- setting: warning.setting as any,
162
- details: warning.details,
163
- };
164
- case "other":
165
- return {
166
- type: "other",
167
- message: warning.message,
168
- };
169
- default:
170
- return {
171
- type: "other",
172
- message: "Unknown warning type",
173
- };
174
- }
175
- },
176
- };
150
+ export const WARNING: Codec<SharedWarning, SharedV3Warning> = {
151
+ encode: () => {
152
+ throw new Error("codec:unimplemented");
153
+ },
154
+ decode: (warning) => warning as SharedWarning,
155
+ };
177
156
 
178
157
  /**
179
158
  * AI SDK response format type.
@@ -147,13 +147,7 @@ export const STREAM_PART: Codec<
147
147
  return {
148
148
  kind: "finish",
149
149
  finishReason: part.finishReason as any, // Types should match
150
- usage: {
151
- inputTokens: part.usage.inputTokens,
152
- outputTokens: part.usage.outputTokens,
153
- totalTokens: part.usage.totalTokens,
154
- reasoningTokens: part.usage.reasoningTokens,
155
- cachedInputTokens: part.usage.cachedInputTokens,
156
- },
150
+ usage: part.usage,
157
151
  providerMetadata: part.providerMetadata,
158
152
  };
159
153
 
@@ -5,13 +5,13 @@ import type {
5
5
  } from "@kernl-sdk/protocol";
6
6
  import type {
7
7
  LanguageModelV3FunctionTool,
8
- LanguageModelV3ProviderDefinedTool,
8
+ LanguageModelV3ProviderTool,
9
9
  LanguageModelV3ToolChoice,
10
10
  } from "@ai-sdk/provider";
11
11
 
12
12
  export const TOOL: Codec<
13
13
  LanguageModelTool,
14
- LanguageModelV3FunctionTool | LanguageModelV3ProviderDefinedTool
14
+ LanguageModelV3FunctionTool | LanguageModelV3ProviderTool
15
15
  > = {
16
16
  encode: (tool) => {
17
17
  if (tool.kind === "function") {
@@ -23,13 +23,13 @@ export const TOOL: Codec<
23
23
  providerOptions: tool.providerOptions,
24
24
  } satisfies LanguageModelV3FunctionTool;
25
25
  } else {
26
- // provider-defined
26
+ // provider tool
27
27
  return {
28
- type: "provider-defined",
28
+ type: "provider",
29
29
  id: tool.id,
30
30
  name: tool.name,
31
31
  args: tool.args,
32
- } satisfies LanguageModelV3ProviderDefinedTool;
32
+ } satisfies LanguageModelV3ProviderTool;
33
33
  }
34
34
  },
35
35
  decode: () => {
@@ -10,16 +10,14 @@ import { EMBEDDING_SETTINGS } from "./convert/embedding";
10
10
  /**
11
11
  * EmbeddingModel adapter for the AI SDK EmbeddingModelV3.
12
12
  */
13
- export class AISDKEmbeddingModel<TValue = string>
14
- implements EmbeddingModel<TValue>
15
- {
13
+ export class AISDKEmbeddingModel implements EmbeddingModel<string> {
16
14
  readonly spec = "1.0" as const;
17
15
  readonly provider: string;
18
16
  readonly modelId: string;
19
17
  readonly maxEmbeddingsPerCall?: number;
20
18
  readonly supportsParallelCalls?: boolean;
21
19
 
22
- constructor(private model: EmbeddingModelV3<TValue>) {
20
+ constructor(private model: EmbeddingModelV3) {
23
21
  this.provider = model.provider;
24
22
  this.modelId = model.modelId;
25
23
 
@@ -33,7 +31,7 @@ export class AISDKEmbeddingModel<TValue = string>
33
31
  }
34
32
 
35
33
  async embed(
36
- request: EmbeddingModelRequest<TValue>,
34
+ request: EmbeddingModelRequest<string>,
37
35
  ): Promise<EmbeddingModelResponse> {
38
36
  const settings = request.settings
39
37
  ? EMBEDDING_SETTINGS.encode(request.settings)
@@ -3,6 +3,7 @@ import type { LanguageModelV3 } from "@ai-sdk/provider";
3
3
  import type {
4
4
  LanguageModel,
5
5
  LanguageModelRequest,
6
+ LanguageModelRequestSettings,
6
7
  LanguageModelResponse,
7
8
  LanguageModelStreamEvent,
8
9
  } from "@kernl-sdk/protocol";
@@ -23,7 +24,14 @@ export class AISDKLanguageModel implements LanguageModel {
23
24
  readonly provider: string;
24
25
  readonly modelId: string;
25
26
 
26
- constructor(private model: LanguageModelV3) {
27
+ /**
28
+ * @param model - The underlying AI SDK model
29
+ * @param settings - Default settings to apply to every request (overridden by per-request settings)
30
+ */
31
+ constructor(
32
+ private model: LanguageModelV3,
33
+ private settings?: Partial<LanguageModelRequestSettings>,
34
+ ) {
27
35
  this.provider = normalizeProvider(model.provider);
28
36
  this.modelId = model.modelId;
29
37
  }
@@ -36,15 +44,16 @@ export class AISDKLanguageModel implements LanguageModel {
36
44
  ): Promise<LanguageModelResponse> {
37
45
  const messages = request.input.map(MESSAGE.encode);
38
46
  const tools = request.tools ? request.tools.map(TOOL.encode) : undefined;
39
- const settings = MODEL_SETTINGS.encode(request.settings);
47
+ const merged = { ...this.settings, ...request.settings };
48
+ const settings = MODEL_SETTINGS.encode(merged);
40
49
  const responseFormat = RESPONSE_FORMAT.encode(request.responseType);
41
50
 
42
51
  const result = await this.model.doGenerate({
43
52
  prompt: messages,
44
53
  tools,
45
- ...settings,
46
54
  responseFormat,
47
55
  abortSignal: request.abort,
56
+ ...settings,
48
57
  });
49
58
 
50
59
  return MODEL_RESPONSE.decode(result);
@@ -58,15 +67,16 @@ export class AISDKLanguageModel implements LanguageModel {
58
67
  ): AsyncIterable<LanguageModelStreamEvent> {
59
68
  const messages = request.input.map(MESSAGE.encode);
60
69
  const tools = request.tools ? request.tools.map(TOOL.encode) : undefined;
61
- const settings = MODEL_SETTINGS.encode(request.settings);
70
+ const merged = { ...this.settings, ...request.settings };
71
+ const settings = MODEL_SETTINGS.encode(merged);
62
72
  const responseFormat = RESPONSE_FORMAT.encode(request.responseType);
63
73
 
64
74
  const stream = await this.model.doStream({
65
75
  prompt: messages,
66
76
  tools,
67
- ...settings,
68
77
  responseFormat,
69
78
  abortSignal: request.abort,
79
+ ...settings,
70
80
  });
71
81
 
72
82
  // text + reasoning buffers for delta accumulation
@@ -0,0 +1,94 @@
1
+ import type { OpenAIOAuthCredentials } from "./types";
2
+
3
+ const TOKEN_URL = "https://auth.openai.com/oauth/token";
4
+ const CLIENT_ID = "app_EMoamEEZ73f0CkXaXp7hrann";
5
+ const CODEX_ENDPOINT = "https://chatgpt.com/backend-api/codex/responses";
6
+
7
+ interface TokenResponse {
8
+ access_token: string;
9
+ refresh_token: string;
10
+ expires_in: number;
11
+ }
12
+
13
+ /**
14
+ * Refresh OpenAI OAuth tokens.
15
+ */
16
+ async function refresh(creds: OpenAIOAuthCredentials): Promise<void> {
17
+ const res = await fetch(TOKEN_URL, {
18
+ method: "POST",
19
+ headers: { "Content-Type": "application/x-www-form-urlencoded" },
20
+ body: new URLSearchParams({
21
+ grant_type: "refresh_token",
22
+ refresh_token: creds.refreshToken,
23
+ client_id: CLIENT_ID,
24
+ }),
25
+ });
26
+
27
+ if (!res.ok) {
28
+ throw new Error(`Token refresh failed: ${res.status}`);
29
+ }
30
+
31
+ const data = (await res.json()) as TokenResponse;
32
+
33
+ creds.accessToken = data.access_token;
34
+ creds.refreshToken = data.refresh_token;
35
+ creds.expiresAt = Date.now() + data.expires_in * 1000;
36
+
37
+ creds.onRefresh?.({
38
+ accessToken: creds.accessToken,
39
+ refreshToken: creds.refreshToken,
40
+ expiresAt: creds.expiresAt,
41
+ });
42
+ }
43
+
44
+ /**
45
+ * Create a fetch wrapper for OpenAI Codex OAuth.
46
+ *
47
+ * Redirects all requests to the Codex endpoint and adds OAuth headers.
48
+ */
49
+ export function createOAuthFetch(creds: OpenAIOAuthCredentials) {
50
+ return async (
51
+ input: string | URL | Request,
52
+ init?: RequestInit,
53
+ ): Promise<Response> => {
54
+ // Refresh if expired (with 30s buffer)
55
+ if (Date.now() >= creds.expiresAt - 30_000) {
56
+ await refresh(creds);
57
+ }
58
+
59
+ const headers = new Headers(init?.headers);
60
+ headers.set("Authorization", `Bearer ${creds.accessToken}`);
61
+
62
+ if (creds.accountId) {
63
+ headers.set("ChatGPT-Account-Id", creds.accountId);
64
+ }
65
+
66
+ // Transform request body for Codex API
67
+ // Codex requires "instructions" field instead of developer/system role in input
68
+ let body = init?.body;
69
+ if (body && typeof body === "string") {
70
+ try {
71
+ const parsed = JSON.parse(body);
72
+
73
+ // Extract developer/system message as instructions
74
+ if (parsed.input && Array.isArray(parsed.input)) {
75
+ const devIdx = parsed.input.findIndex(
76
+ (m: Record<string, unknown>) =>
77
+ m.role === "developer" || m.role === "system",
78
+ );
79
+ if (devIdx !== -1) {
80
+ const devMsg = parsed.input[devIdx];
81
+ parsed.instructions = devMsg.content;
82
+ parsed.input.splice(devIdx, 1);
83
+ }
84
+ }
85
+
86
+ body = JSON.stringify(parsed);
87
+ } catch {
88
+ // ignore parse errors
89
+ }
90
+ }
91
+
92
+ return fetch(CODEX_ENDPOINT, { ...init, headers, body });
93
+ };
94
+ }
@@ -0,0 +1,25 @@
1
+ /**
2
+ * Base OAuth credentials.
3
+ */
4
+ export interface OAuthCredentials {
5
+ /** Current access token */
6
+ accessToken: string;
7
+ /** Refresh token for obtaining new access tokens */
8
+ refreshToken: string;
9
+ /** Expiration timestamp in milliseconds */
10
+ expiresAt: number;
11
+ /** Called when tokens are refreshed - use to persist new tokens */
12
+ onRefresh?: (tokens: {
13
+ accessToken: string;
14
+ refreshToken: string;
15
+ expiresAt: number;
16
+ }) => void;
17
+ }
18
+
19
+ /**
20
+ * OpenAI OAuth credentials (ChatGPT Plus/Pro via Codex).
21
+ */
22
+ export interface OpenAIOAuthCredentials extends OAuthCredentials {
23
+ /** Account ID for org/team subscriptions */
24
+ accountId?: string;
25
+ }
@@ -1,29 +1,49 @@
1
- import { anthropic as _anthropic } from "@ai-sdk/anthropic";
1
+ import {
2
+ anthropic as _anthropic,
3
+ createAnthropic as _createAnthropic,
4
+ } from "@ai-sdk/anthropic";
2
5
  import { AISDKLanguageModel } from "../language-model";
3
6
 
4
7
  /**
5
- * Anthropic model IDs.
8
+ * Anthropic model IDs (derived from @ai-sdk/anthropic).
6
9
  */
7
- export type AnthropicModelId =
8
- | "claude-haiku-4-5"
9
- | "claude-haiku-4-5-20251001"
10
- | "claude-sonnet-4-5"
11
- | "claude-sonnet-4-5-20250929"
12
- | "claude-opus-4-1"
13
- | "claude-opus-4-0"
14
- | "claude-sonnet-4-0"
15
- | "claude-opus-4-1-20250805"
16
- | "claude-opus-4-20250514"
17
- | "claude-sonnet-4-20250514"
18
- | "claude-3-7-sonnet-latest"
19
- | "claude-3-7-sonnet-20250219"
20
- | "claude-3-5-haiku-latest"
21
- | "claude-3-5-haiku-20241022"
22
- | "claude-3-haiku-20240307"
23
- | (string & {});
10
+ export type AnthropicModelId = Parameters<typeof _anthropic>[0];
11
+
12
+ /**
13
+ * Options for creating a custom Anthropic provider.
14
+ */
15
+ export interface AnthropicProviderOptions {
16
+ /** API key for authentication */
17
+ apiKey?: string;
18
+ /** Custom base URL */
19
+ baseURL?: string;
20
+ /** Custom headers */
21
+ headers?: Record<string, string>;
22
+ }
23
+
24
+ /**
25
+ * Create a custom Anthropic provider with explicit credentials.
26
+ *
27
+ * @example
28
+ * ```ts
29
+ * const anthropic = createAnthropic({ apiKey: "sk-..." });
30
+ * const model = anthropic("claude-sonnet-4-5");
31
+ * ```
32
+ */
33
+ export function createAnthropic(options: AnthropicProviderOptions = {}) {
34
+ const provider = _createAnthropic({
35
+ apiKey: options.apiKey,
36
+ baseURL: options.baseURL,
37
+ headers: options.headers,
38
+ });
39
+
40
+ return (modelId: AnthropicModelId) =>
41
+ new AISDKLanguageModel(provider(modelId));
42
+ }
24
43
 
25
44
  /**
26
45
  * Create a kernl-compatible Anthropic language model.
46
+ * Uses ANTHROPIC_API_KEY environment variable.
27
47
  *
28
48
  * @example
29
49
  * ```ts
@@ -1,45 +1,12 @@
1
1
  import { google as _google } from "@ai-sdk/google";
2
+ import { registerEmbeddingProvider } from "@kernl-sdk/retrieval";
2
3
  import { AISDKLanguageModel } from "../language-model";
3
4
  import { AISDKEmbeddingModel } from "../embedding-model";
4
- import { registerEmbeddingProvider } from "@kernl-sdk/retrieval";
5
5
 
6
6
  /**
7
- * Google Generative AI model IDs.
7
+ * Google model IDs (derived from @ai-sdk/google).
8
8
  */
9
- type GoogleGenerativeAIModelId =
10
- | "gemini-1.5-flash"
11
- | "gemini-1.5-flash-latest"
12
- | "gemini-1.5-flash-001"
13
- | "gemini-1.5-flash-002"
14
- | "gemini-1.5-flash-8b"
15
- | "gemini-1.5-flash-8b-latest"
16
- | "gemini-1.5-flash-8b-001"
17
- | "gemini-1.5-pro"
18
- | "gemini-1.5-pro-latest"
19
- | "gemini-1.5-pro-001"
20
- | "gemini-1.5-pro-002"
21
- | "gemini-2.0-flash"
22
- | "gemini-2.0-flash-001"
23
- | "gemini-2.0-flash-live-001"
24
- | "gemini-2.0-flash-lite"
25
- | "gemini-2.0-pro-exp-02-05"
26
- | "gemini-2.0-flash-thinking-exp-01-21"
27
- | "gemini-2.0-flash-exp"
28
- | "gemini-2.5-pro"
29
- | "gemini-2.5-flash"
30
- | "gemini-2.5-flash-image-preview"
31
- | "gemini-2.5-flash-lite"
32
- | "gemini-2.5-flash-lite-preview-09-2025"
33
- | "gemini-2.5-flash-preview-04-17"
34
- | "gemini-2.5-flash-preview-09-2025"
35
- | "gemini-pro-latest"
36
- | "gemini-flash-latest"
37
- | "gemini-flash-lite-latest"
38
- | "gemini-2.5-pro-exp-03-25"
39
- | "gemini-exp-1206"
40
- | "gemma-3-12b-it"
41
- | "gemma-3-27b-it"
42
- | (string & {});
9
+ export type GoogleModelId = Parameters<typeof _google>[0];
43
10
 
44
11
  /**
45
12
  * Create a kernl-compatible Google Generative AI language model.
@@ -52,7 +19,7 @@ type GoogleGenerativeAIModelId =
52
19
  * const response = await gemini.generate([...], {});
53
20
  * ```
54
21
  */
55
- export function google(modelId: GoogleGenerativeAIModelId) {
22
+ export function google(modelId: GoogleModelId) {
56
23
  const model = _google(modelId);
57
24
  return new AISDKLanguageModel(model);
58
25
  }