@kernl-sdk/ai 0.1.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (81) hide show
  1. package/.turbo/turbo-build.log +4 -0
  2. package/CHANGELOG.md +10 -0
  3. package/LICENSE +201 -0
  4. package/dist/__tests__/integration.test.d.ts +2 -0
  5. package/dist/__tests__/integration.test.d.ts.map +1 -0
  6. package/dist/__tests__/integration.test.js +388 -0
  7. package/dist/convert/__tests__/message.test.d.ts +2 -0
  8. package/dist/convert/__tests__/message.test.d.ts.map +1 -0
  9. package/dist/convert/__tests__/message.test.js +300 -0
  10. package/dist/convert/__tests__/response.test.d.ts +2 -0
  11. package/dist/convert/__tests__/response.test.d.ts.map +1 -0
  12. package/dist/convert/__tests__/response.test.js +49 -0
  13. package/dist/convert/__tests__/settings.test.d.ts +2 -0
  14. package/dist/convert/__tests__/settings.test.d.ts.map +1 -0
  15. package/dist/convert/__tests__/settings.test.js +144 -0
  16. package/dist/convert/__tests__/stream.test.d.ts +2 -0
  17. package/dist/convert/__tests__/stream.test.d.ts.map +1 -0
  18. package/dist/convert/__tests__/stream.test.js +389 -0
  19. package/dist/convert/__tests__/tools.test.d.ts +2 -0
  20. package/dist/convert/__tests__/tools.test.d.ts.map +1 -0
  21. package/dist/convert/__tests__/tools.test.js +152 -0
  22. package/dist/convert/message.d.ts +4 -0
  23. package/dist/convert/message.d.ts.map +1 -0
  24. package/dist/convert/message.js +122 -0
  25. package/dist/convert/messages.d.ts +4 -0
  26. package/dist/convert/messages.d.ts.map +1 -0
  27. package/dist/convert/messages.js +130 -0
  28. package/dist/convert/response.d.ts +15 -0
  29. package/dist/convert/response.d.ts.map +1 -0
  30. package/dist/convert/response.js +105 -0
  31. package/dist/convert/settings.d.ts +16 -0
  32. package/dist/convert/settings.d.ts.map +1 -0
  33. package/dist/convert/settings.js +36 -0
  34. package/dist/convert/stream.d.ts +11 -0
  35. package/dist/convert/stream.d.ts.map +1 -0
  36. package/dist/convert/stream.js +154 -0
  37. package/dist/convert/tools.d.ts +5 -0
  38. package/dist/convert/tools.d.ts.map +1 -0
  39. package/dist/convert/tools.js +42 -0
  40. package/dist/error.d.ts +8 -0
  41. package/dist/error.d.ts.map +1 -0
  42. package/dist/error.js +15 -0
  43. package/dist/index.d.ts +20 -0
  44. package/dist/index.d.ts.map +1 -0
  45. package/dist/index.js +20 -0
  46. package/dist/language-model.d.ts +21 -0
  47. package/dist/language-model.d.ts.map +1 -0
  48. package/dist/language-model.js +60 -0
  49. package/dist/providers/anthropic.d.ts +14 -0
  50. package/dist/providers/anthropic.d.ts.map +1 -0
  51. package/dist/providers/anthropic.js +17 -0
  52. package/dist/providers/google.d.ts +14 -0
  53. package/dist/providers/google.d.ts.map +1 -0
  54. package/dist/providers/google.js +17 -0
  55. package/dist/providers/openai.d.ts +14 -0
  56. package/dist/providers/openai.d.ts.map +1 -0
  57. package/dist/providers/openai.js +17 -0
  58. package/dist/types.d.ts +1 -0
  59. package/dist/types.d.ts.map +1 -0
  60. package/dist/types.js +1 -0
  61. package/package.json +79 -0
  62. package/src/__tests__/integration.test.ts +447 -0
  63. package/src/convert/__tests__/message.test.ts +336 -0
  64. package/src/convert/__tests__/response.test.ts +63 -0
  65. package/src/convert/__tests__/settings.test.ts +188 -0
  66. package/src/convert/__tests__/stream.test.ts +460 -0
  67. package/src/convert/__tests__/tools.test.ts +179 -0
  68. package/src/convert/message.ts +150 -0
  69. package/src/convert/response.ts +144 -0
  70. package/src/convert/settings.ts +62 -0
  71. package/src/convert/stream.ts +181 -0
  72. package/src/convert/tools.ts +59 -0
  73. package/src/error.ts +16 -0
  74. package/src/index.ts +22 -0
  75. package/src/language-model.ts +77 -0
  76. package/src/providers/anthropic.ts +18 -0
  77. package/src/providers/google.ts +18 -0
  78. package/src/providers/openai.ts +18 -0
  79. package/src/types.ts +0 -0
  80. package/tsconfig.json +13 -0
  81. package/vitest.config.ts +14 -0
@@ -0,0 +1,150 @@
1
+ import type { Codec, LanguageModelItem } from "@kernl-sdk/protocol";
2
+ import type {
3
+ LanguageModelV3Message,
4
+ LanguageModelV3TextPart,
5
+ LanguageModelV3FilePart,
6
+ LanguageModelV3ReasoningPart,
7
+ LanguageModelV3ToolCallPart,
8
+ } from "@ai-sdk/provider";
9
+
10
+ export const MESSAGE: Codec<LanguageModelItem, LanguageModelV3Message> = {
11
+ encode: (item) => {
12
+ switch (item.kind) {
13
+ case "message": {
14
+ switch (item.role) {
15
+ case "system": {
16
+ const content = item.content
17
+ .filter((part) => part.kind === "text")
18
+ .map((part) => (part.kind === "text" ? part.text : ""))
19
+ .join("\n");
20
+
21
+ return {
22
+ role: "system",
23
+ content: content,
24
+ providerOptions: item.providerMetadata,
25
+ };
26
+ }
27
+
28
+ case "user": {
29
+ const content: Array<
30
+ LanguageModelV3TextPart | LanguageModelV3FilePart
31
+ > = [];
32
+
33
+ for (const part of item.content) {
34
+ if (part.kind === "text") {
35
+ content.push({
36
+ type: "text",
37
+ text: part.text,
38
+ providerOptions: part.providerMetadata,
39
+ });
40
+ } else if (part.kind === "file") {
41
+ content.push({
42
+ type: "file",
43
+ filename: part.filename,
44
+ data: part.data,
45
+ mediaType: part.mimeType,
46
+ providerOptions: part.providerMetadata,
47
+ });
48
+ }
49
+ }
50
+
51
+ return {
52
+ role: "user",
53
+ content,
54
+ providerOptions: item.providerMetadata,
55
+ };
56
+ }
57
+
58
+ case "assistant": {
59
+ const content: Array<
60
+ | LanguageModelV3TextPart
61
+ | LanguageModelV3FilePart
62
+ | LanguageModelV3ReasoningPart
63
+ | LanguageModelV3ToolCallPart
64
+ > = [];
65
+
66
+ for (const part of item.content) {
67
+ if (part.kind === "text") {
68
+ content.push({
69
+ type: "text",
70
+ text: part.text,
71
+ providerOptions: part.providerMetadata,
72
+ });
73
+ } else if (part.kind === "file") {
74
+ content.push({
75
+ type: "file",
76
+ filename: part.filename,
77
+ data: part.data,
78
+ mediaType: part.mimeType,
79
+ providerOptions: part.providerMetadata,
80
+ });
81
+ }
82
+ }
83
+
84
+ return {
85
+ role: "assistant",
86
+ content,
87
+ providerOptions: item.providerMetadata,
88
+ };
89
+ }
90
+ }
91
+ break;
92
+ }
93
+
94
+ case "reasoning": {
95
+ return {
96
+ role: "assistant",
97
+ content: [
98
+ {
99
+ type: "reasoning",
100
+ text: "text" in item ? item.text : "",
101
+ providerOptions: item.providerMetadata,
102
+ },
103
+ ],
104
+ };
105
+ }
106
+
107
+ case "tool-call": {
108
+ return {
109
+ role: "assistant",
110
+ content: [
111
+ {
112
+ type: "tool-call",
113
+ toolCallId: item.callId,
114
+ toolName: item.toolId,
115
+ input: JSON.parse(item.arguments),
116
+ providerOptions: item.providerMetadata,
117
+ },
118
+ ],
119
+ };
120
+ }
121
+
122
+ case "tool-result": {
123
+ return {
124
+ role: "tool",
125
+ content: [
126
+ {
127
+ type: "tool-result",
128
+ toolCallId: item.callId,
129
+ toolName: item.toolId,
130
+ output: {
131
+ type: "json",
132
+ value: item.result,
133
+ },
134
+ providerOptions: item.providerMetadata,
135
+ },
136
+ ],
137
+ };
138
+ }
139
+
140
+ default:
141
+ throw new Error(
142
+ `Unsupported LanguageModelItem kind: ${(item as any).kind}`,
143
+ );
144
+ }
145
+ },
146
+
147
+ decode: () => {
148
+ throw new Error("codec:unimplemented");
149
+ },
150
+ };
@@ -0,0 +1,144 @@
1
+ import type {
2
+ Codec,
3
+ LanguageModelResponse,
4
+ LanguageModelResponseItem,
5
+ LanguageModelFinishReason,
6
+ LanguageModelUsage,
7
+ LanguageModelWarning,
8
+ SharedProviderMetadata,
9
+ } from "@kernl-sdk/protocol";
10
+ import { randomID } from "@kernl-sdk/shared/lib";
11
+ import type {
12
+ LanguageModelV3Content,
13
+ LanguageModelV3FinishReason,
14
+ LanguageModelV3Usage,
15
+ LanguageModelV3CallWarning,
16
+ } from "@ai-sdk/provider";
17
+
18
+ /**
19
+ * AI SDK generate result structure
20
+ */
21
+ export interface AISdkGenerateResult {
22
+ content: Array<LanguageModelV3Content>;
23
+ finishReason: LanguageModelV3FinishReason;
24
+ usage: LanguageModelV3Usage;
25
+ providerMetadata?: Record<string, unknown>;
26
+ warnings: Array<LanguageModelV3CallWarning>;
27
+ }
28
+
29
+ export const MODEL_RESPONSE: Codec<LanguageModelResponse, AISdkGenerateResult> =
30
+ {
31
+ encode: () => {
32
+ throw new Error("codec:unimplemented");
33
+ },
34
+
35
+ decode: (result: AISdkGenerateResult) => {
36
+ const content: LanguageModelResponseItem[] = [];
37
+
38
+ for (const item of result.content) {
39
+ if (item.type === "text") {
40
+ content.push({
41
+ kind: "message",
42
+ role: "assistant",
43
+ id: randomID(),
44
+ content: [
45
+ {
46
+ kind: "text",
47
+ text: item.text,
48
+ providerMetadata: item.providerMetadata,
49
+ },
50
+ ],
51
+ providerMetadata: item.providerMetadata,
52
+ });
53
+ } else if (item.type === "reasoning") {
54
+ content.push({
55
+ kind: "reasoning",
56
+ text: item.text,
57
+ providerMetadata: item.providerMetadata,
58
+ });
59
+ } else if (item.type === "tool-call") {
60
+ content.push({
61
+ kind: "tool-call",
62
+ callId: item.toolCallId,
63
+ toolId: item.toolName,
64
+ state: "completed",
65
+ arguments: JSON.stringify(item.input),
66
+ providerMetadata: item.providerMetadata,
67
+ });
68
+ } else if (item.type === "file") {
69
+ content.push({
70
+ kind: "message",
71
+ role: "assistant",
72
+ id: randomID(),
73
+ content: [
74
+ {
75
+ kind: "file",
76
+ mimeType: item.mediaType,
77
+ data: item.data,
78
+ },
79
+ ],
80
+ });
81
+ }
82
+ // TODO: Handle other content types (source, tool-result)
83
+ }
84
+
85
+ const finishReason = FINISH_REASON.decode(result.finishReason);
86
+ const usage = USAGE.decode(result.usage);
87
+ const warnings = result.warnings.map(WARNING.decode);
88
+
89
+ return {
90
+ content,
91
+ finishReason,
92
+ usage,
93
+ warnings,
94
+ providerMetadata: result.providerMetadata as
95
+ | SharedProviderMetadata
96
+ | undefined,
97
+ };
98
+ },
99
+ };
100
+
101
+ const FINISH_REASON: Codec<
102
+ LanguageModelFinishReason,
103
+ LanguageModelV3FinishReason
104
+ > = {
105
+ encode: () => {
106
+ throw new Error("codec:unimplemented");
107
+ },
108
+ decode: (reason) => reason as LanguageModelFinishReason,
109
+ };
110
+
111
+ const USAGE: Codec<LanguageModelUsage, LanguageModelV3Usage> = {
112
+ encode: () => {
113
+ throw new Error("codec:unimplemented");
114
+ },
115
+ decode: (usage) => usage as LanguageModelUsage,
116
+ };
117
+
118
+ export const WARNING: Codec<LanguageModelWarning, LanguageModelV3CallWarning> =
119
+ {
120
+ encode: () => {
121
+ throw new Error("codec:unimplemented");
122
+ },
123
+
124
+ decode: (warning: LanguageModelV3CallWarning) => {
125
+ switch (warning.type) {
126
+ case "unsupported-setting":
127
+ return {
128
+ type: "unsupported-setting",
129
+ setting: warning.setting as any,
130
+ details: warning.details,
131
+ };
132
+ case "other":
133
+ return {
134
+ type: "other",
135
+ message: warning.message,
136
+ };
137
+ default:
138
+ return {
139
+ type: "other",
140
+ message: "Unknown warning type",
141
+ };
142
+ }
143
+ },
144
+ };
@@ -0,0 +1,62 @@
1
+ import type { Codec, LanguageModelRequestSettings } from "@kernl-sdk/protocol";
2
+ import type {
3
+ LanguageModelV3ToolChoice,
4
+ SharedV3ProviderOptions,
5
+ } from "@ai-sdk/provider";
6
+
7
+ import { TOOL_CHOICE } from "./tools";
8
+
9
+ /**
10
+ * Partial AI SDK call options extracted from settings.
11
+ */
12
+ export interface AISdkCallOptions {
13
+ temperature?: number;
14
+ topP?: number;
15
+ maxOutputTokens?: number;
16
+ frequencyPenalty?: number;
17
+ presencePenalty?: number;
18
+ toolChoice?: LanguageModelV3ToolChoice;
19
+ providerOptions?: SharedV3ProviderOptions;
20
+ }
21
+
22
+ export const MODEL_SETTINGS: Codec<
23
+ LanguageModelRequestSettings,
24
+ AISdkCallOptions
25
+ > = {
26
+ encode: (settings: LanguageModelRequestSettings) => {
27
+ const options: AISdkCallOptions = {};
28
+
29
+ if (settings.temperature !== undefined) {
30
+ options.temperature = settings.temperature;
31
+ }
32
+ if (settings.topP !== undefined) {
33
+ options.topP = settings.topP;
34
+ }
35
+ if (settings.maxTokens !== undefined) {
36
+ options.maxOutputTokens = settings.maxTokens;
37
+ }
38
+ if (settings.frequencyPenalty !== undefined) {
39
+ options.frequencyPenalty = settings.frequencyPenalty;
40
+ }
41
+ if (settings.presencePenalty !== undefined) {
42
+ options.presencePenalty = settings.presencePenalty;
43
+ }
44
+ if (settings.toolChoice !== undefined) {
45
+ options.toolChoice = TOOL_CHOICE.encode(settings.toolChoice);
46
+ }
47
+ if (settings.providerOptions !== undefined) {
48
+ options.providerOptions =
49
+ settings.providerOptions as SharedV3ProviderOptions;
50
+ }
51
+
52
+ // TODO: Handle reasoning settings (settings.reasoning)
53
+ // TODO: Handle text settings (settings.text)
54
+ // TODO: Handle parallelToolCalls (not in AI SDK v3 base interface)
55
+ // These may need to be mapped to provider-specific options
56
+
57
+ return options;
58
+ },
59
+ decode: () => {
60
+ throw new Error("codec:unimplemented");
61
+ },
62
+ };
@@ -0,0 +1,181 @@
1
+ import type { Codec, LanguageModelStreamEvent } from "@kernl-sdk/protocol";
2
+ import type { LanguageModelV3StreamPart } from "@ai-sdk/provider";
3
+ import { WARNING } from "./response";
4
+
5
+ /**
6
+ * Convert AI SDK stream to async iterable of kernl stream events.
7
+ */
8
+ export async function* convertStream(
9
+ stream: ReadableStream<LanguageModelV3StreamPart>,
10
+ ): AsyncIterable<LanguageModelStreamEvent> {
11
+ const reader = stream.getReader();
12
+
13
+ try {
14
+ while (true) {
15
+ const { done, value } = await reader.read();
16
+ if (done) break;
17
+
18
+ const event = STREAM_PART.decode(value);
19
+ if (event) {
20
+ yield event;
21
+ }
22
+ }
23
+ } finally {
24
+ reader.releaseLock();
25
+ }
26
+ }
27
+
28
+ /**
29
+ * Codec for converting individual stream parts.
30
+ */
31
+ export const STREAM_PART: Codec<
32
+ LanguageModelStreamEvent | null,
33
+ LanguageModelV3StreamPart
34
+ > = {
35
+ encode: () => {
36
+ throw new Error("codec:unimplemented");
37
+ },
38
+
39
+ decode: (part) => {
40
+ switch (part.type) {
41
+ case "text-start":
42
+ return {
43
+ kind: "text-start",
44
+ id: part.id,
45
+ providerMetadata: part.providerMetadata,
46
+ };
47
+
48
+ case "text-delta":
49
+ return {
50
+ kind: "text-delta",
51
+ id: part.id,
52
+ text: part.delta,
53
+ providerMetadata: part.providerMetadata,
54
+ };
55
+
56
+ case "text-end":
57
+ return {
58
+ kind: "text-end",
59
+ id: part.id,
60
+ providerMetadata: part.providerMetadata,
61
+ };
62
+
63
+ case "reasoning-start":
64
+ return {
65
+ kind: "reasoning-start",
66
+ id: part.id,
67
+ providerMetadata: part.providerMetadata,
68
+ };
69
+
70
+ case "reasoning-delta":
71
+ return {
72
+ kind: "reasoning-delta",
73
+ id: part.id,
74
+ text: part.delta,
75
+ providerMetadata: part.providerMetadata,
76
+ };
77
+
78
+ case "reasoning-end":
79
+ return {
80
+ kind: "reasoning-end",
81
+ id: part.id,
82
+ providerMetadata: part.providerMetadata,
83
+ };
84
+
85
+ case "tool-input-start":
86
+ return {
87
+ kind: "tool-input-start",
88
+ id: part.id,
89
+ toolName: part.toolName,
90
+ title: part.title,
91
+ providerMetadata: part.providerMetadata,
92
+ };
93
+
94
+ case "tool-input-delta":
95
+ return {
96
+ kind: "tool-input-delta",
97
+ id: part.id,
98
+ delta: part.delta,
99
+ providerMetadata: part.providerMetadata,
100
+ };
101
+
102
+ case "tool-input-end":
103
+ return {
104
+ kind: "tool-input-end",
105
+ id: part.id,
106
+ providerMetadata: part.providerMetadata,
107
+ };
108
+
109
+ case "tool-call":
110
+ return {
111
+ kind: "tool-call",
112
+ id: part.toolCallId,
113
+ toolName: part.toolName,
114
+ arguments: part.input,
115
+ providerMetadata: part.providerMetadata,
116
+ };
117
+
118
+ case "tool-result":
119
+ // Provider-defined tools can stream tool results
120
+ return {
121
+ kind: "tool-result",
122
+ callId: part.toolCallId,
123
+ toolId: part.toolName,
124
+ state: part.isError ? "failed" : "completed",
125
+ result: part.result,
126
+ error: part.isError ? String(part.result) : null,
127
+ providerMetadata: part.providerMetadata,
128
+ };
129
+
130
+ case "file":
131
+ case "source":
132
+ // These don't have direct Kernl equivalents in streaming
133
+ // Could be handled as raw events
134
+ return {
135
+ kind: "raw",
136
+ rawValue: part,
137
+ };
138
+
139
+ case "stream-start":
140
+ return {
141
+ kind: "stream-start",
142
+ warnings: part.warnings.map(WARNING.decode),
143
+ };
144
+
145
+ case "finish":
146
+ return {
147
+ kind: "finish",
148
+ finishReason: part.finishReason as any, // Types should match
149
+ usage: {
150
+ inputTokens: part.usage.inputTokens,
151
+ outputTokens: part.usage.outputTokens,
152
+ totalTokens: part.usage.totalTokens,
153
+ reasoningTokens: part.usage.reasoningTokens,
154
+ cachedInputTokens: part.usage.cachedInputTokens,
155
+ },
156
+ providerMetadata: part.providerMetadata,
157
+ };
158
+
159
+ case "error":
160
+ return {
161
+ kind: "error",
162
+ error: part.error,
163
+ };
164
+
165
+ case "raw":
166
+ return {
167
+ kind: "raw",
168
+ rawValue: part.rawValue,
169
+ };
170
+
171
+ case "response-metadata":
172
+ // Kernl doesn't have a specific event for response metadata
173
+ // Could be passed through as raw or ignored
174
+ return null;
175
+
176
+ default:
177
+ // Unknown event type
178
+ return null;
179
+ }
180
+ },
181
+ };
@@ -0,0 +1,59 @@
1
+ import type {
2
+ Codec,
3
+ LanguageModelTool,
4
+ LanguageModelToolChoice,
5
+ } from "@kernl-sdk/protocol";
6
+ import type {
7
+ LanguageModelV3FunctionTool,
8
+ LanguageModelV3ProviderDefinedTool,
9
+ LanguageModelV3ToolChoice,
10
+ } from "@ai-sdk/provider";
11
+
12
+ export const TOOL: Codec<
13
+ LanguageModelTool,
14
+ LanguageModelV3FunctionTool | LanguageModelV3ProviderDefinedTool
15
+ > = {
16
+ encode: (tool) => {
17
+ if (tool.kind === "function") {
18
+ return {
19
+ type: "function",
20
+ name: tool.name,
21
+ description: tool.description,
22
+ inputSchema: tool.parameters,
23
+ providerOptions: tool.providerOptions,
24
+ } satisfies LanguageModelV3FunctionTool;
25
+ } else {
26
+ // provider-defined
27
+ return {
28
+ type: "provider-defined",
29
+ id: tool.id,
30
+ name: tool.name,
31
+ args: tool.args,
32
+ } satisfies LanguageModelV3ProviderDefinedTool;
33
+ }
34
+ },
35
+ decode: () => {
36
+ throw new Error("codec:unimplemented");
37
+ },
38
+ };
39
+
40
+ export const TOOL_CHOICE: Codec<
41
+ LanguageModelToolChoice,
42
+ LanguageModelV3ToolChoice
43
+ > = {
44
+ encode: (choice) => {
45
+ switch (choice.kind) {
46
+ case "auto":
47
+ return { type: "auto" };
48
+ case "none":
49
+ return { type: "none" };
50
+ case "required":
51
+ return { type: "required" };
52
+ case "tool":
53
+ return { type: "tool", toolName: choice.toolId };
54
+ }
55
+ },
56
+ decode: () => {
57
+ throw new Error("codec:unimplemented");
58
+ },
59
+ };
package/src/error.ts ADDED
@@ -0,0 +1,16 @@
1
+ /**
2
+ * Wrap AI SDK errors with additional context.
3
+ *
4
+ * @param error - The error from AI SDK
5
+ * @param context - Additional context about where the error occurred
6
+ */
7
+ export function wrapError(error: unknown, context: string): Error {
8
+ if (error instanceof Error) {
9
+ const wrapped = new Error(`AI SDK error in ${context}: ${error.message}`);
10
+ wrapped.stack = error.stack;
11
+ wrapped.cause = error;
12
+ return wrapped;
13
+ }
14
+
15
+ return new Error(`AI SDK error in ${context}: ${String(error)}`);
16
+ }
package/src/index.ts ADDED
@@ -0,0 +1,22 @@
1
+ /**
2
+ * @kernl-sdk/ai - AI SDK adapter for Kernl
3
+ *
4
+ * Universal provider support via Vercel AI SDK v5.
5
+ *
6
+ * @example
7
+ * ```ts
8
+ * import { anthropic } from '@kernl-sdk/ai/anthropic';
9
+ *
10
+ * const claude = anthropic('claude-3-5-sonnet-20241022');
11
+ * const response = await claude.generate([...], {});
12
+ * ```
13
+ */
14
+
15
+ export { AISDKLanguageModel } from "./language-model";
16
+
17
+ // Re-export codecs for custom provider implementations
18
+ export { MESSAGE } from "./convert/message";
19
+ export { TOOL, TOOL_CHOICE } from "./convert/tools";
20
+ export { MODEL_SETTINGS } from "./convert/settings";
21
+ export { MODEL_RESPONSE, WARNING } from "./convert/response";
22
+ export { convertStream } from "./convert/stream";