@kernl-sdk/ai 0.1.3 → 0.2.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (68) hide show
  1. package/.turbo/turbo-build.log +5 -4
  2. package/.turbo/turbo-check-types.log +4 -0
  3. package/CHANGELOG.md +81 -0
  4. package/LICENSE +1 -1
  5. package/dist/__tests__/integration.test.js +278 -27
  6. package/dist/__tests__/language-model.test.js +3 -2
  7. package/dist/convert/__tests__/message.test.js +28 -3
  8. package/dist/convert/__tests__/response.test.js +1 -1
  9. package/dist/convert/__tests__/settings.test.js +1 -1
  10. package/dist/convert/__tests__/stream.test.js +32 -8
  11. package/dist/convert/__tests__/tools.test.js +1 -1
  12. package/dist/convert/__tests__/ui-message.test.d.ts +2 -0
  13. package/dist/convert/__tests__/ui-message.test.d.ts.map +1 -0
  14. package/dist/convert/__tests__/ui-message.test.js +1836 -0
  15. package/dist/convert/__tests__/ui-stream.test.d.ts +2 -0
  16. package/dist/convert/__tests__/ui-stream.test.d.ts.map +1 -0
  17. package/dist/convert/__tests__/ui-stream.test.js +452 -0
  18. package/dist/convert/message.d.ts +2 -1
  19. package/dist/convert/message.d.ts.map +1 -1
  20. package/dist/convert/message.js +15 -9
  21. package/dist/convert/response.d.ts +2 -1
  22. package/dist/convert/response.d.ts.map +1 -1
  23. package/dist/convert/response.js +66 -46
  24. package/dist/convert/settings.d.ts +2 -1
  25. package/dist/convert/settings.d.ts.map +1 -1
  26. package/dist/convert/settings.js +1 -1
  27. package/dist/convert/stream.d.ts +2 -1
  28. package/dist/convert/stream.d.ts.map +1 -1
  29. package/dist/convert/stream.js +13 -18
  30. package/dist/convert/tools.d.ts +2 -1
  31. package/dist/convert/tools.d.ts.map +1 -1
  32. package/dist/convert/ui-message.d.ts +40 -0
  33. package/dist/convert/ui-message.d.ts.map +1 -0
  34. package/dist/convert/ui-message.js +324 -0
  35. package/dist/convert/ui-stream.d.ts +29 -0
  36. package/dist/convert/ui-stream.d.ts.map +1 -0
  37. package/dist/convert/ui-stream.js +139 -0
  38. package/dist/index.d.ts +8 -6
  39. package/dist/index.d.ts.map +1 -1
  40. package/dist/index.js +8 -6
  41. package/dist/language-model.d.ts.map +1 -1
  42. package/dist/language-model.js +77 -88
  43. package/dist/providers/anthropic.d.ts +1 -1
  44. package/dist/providers/anthropic.js +1 -1
  45. package/dist/providers/google.d.ts +1 -1
  46. package/dist/providers/google.js +1 -1
  47. package/dist/providers/openai.d.ts +1 -1
  48. package/dist/providers/openai.js +1 -1
  49. package/package.json +12 -8
  50. package/src/__tests__/integration.test.ts +789 -507
  51. package/src/__tests__/language-model.test.ts +2 -1
  52. package/src/convert/__tests__/message.test.ts +29 -2
  53. package/src/convert/__tests__/stream.test.ts +34 -7
  54. package/src/convert/__tests__/ui-message.test.ts +2008 -0
  55. package/src/convert/__tests__/ui-stream.test.ts +547 -0
  56. package/src/convert/message.ts +17 -12
  57. package/src/convert/response.ts +82 -52
  58. package/src/convert/settings.ts +2 -1
  59. package/src/convert/stream.ts +22 -20
  60. package/src/convert/tools.ts +1 -1
  61. package/src/convert/ui-message.ts +409 -0
  62. package/src/convert/ui-stream.ts +167 -0
  63. package/src/index.ts +2 -0
  64. package/src/language-model.ts +78 -87
  65. package/tsconfig.json +1 -1
  66. package/vitest.config.ts +1 -0
  67. package/src/error.ts +0 -16
  68. package/src/types.ts +0 -0
@@ -1,11 +1,14 @@
1
- import type {
2
- Codec,
3
- LanguageModelResponse,
4
- LanguageModelResponseItem,
5
- LanguageModelFinishReason,
6
- LanguageModelUsage,
7
- LanguageModelWarning,
8
- SharedProviderMetadata,
1
+ import type { Codec } from "@kernl-sdk/shared/lib";
2
+ import {
3
+ IN_PROGRESS,
4
+ COMPLETED,
5
+ FAILED,
6
+ type LanguageModelResponse,
7
+ type LanguageModelResponseItem,
8
+ type LanguageModelFinishReason,
9
+ type LanguageModelUsage,
10
+ type LanguageModelWarning,
11
+ type SharedProviderMetadata,
9
12
  } from "@kernl-sdk/protocol";
10
13
  import { randomID } from "@kernl-sdk/shared/lib";
11
14
  import type {
@@ -32,54 +35,81 @@ export const MODEL_RESPONSE: Codec<LanguageModelResponse, AISdkGenerateResult> =
32
35
  throw new Error("codec:unimplemented");
33
36
  },
34
37
 
35
- decode: (result: AISdkGenerateResult) => {
38
+ decode: (result: AISdkGenerateResult): LanguageModelResponse => {
36
39
  const content: LanguageModelResponseItem[] = [];
37
40
 
38
41
  for (const item of result.content) {
39
- if (item.type === "text") {
40
- content.push({
41
- kind: "message",
42
- role: "assistant",
43
- id: randomID(),
44
- content: [
45
- {
46
- kind: "text",
47
- text: item.text,
48
- providerMetadata: item.providerMetadata,
49
- },
50
- ],
51
- providerMetadata: item.providerMetadata,
52
- });
53
- } else if (item.type === "reasoning") {
54
- content.push({
55
- kind: "reasoning",
56
- text: item.text,
57
- providerMetadata: item.providerMetadata,
58
- });
59
- } else if (item.type === "tool-call") {
60
- content.push({
61
- kind: "tool-call",
62
- callId: item.toolCallId,
63
- toolId: item.toolName,
64
- state: "completed",
65
- arguments: item.input,
66
- providerMetadata: item.providerMetadata,
67
- });
68
- } else if (item.type === "file") {
69
- content.push({
70
- kind: "message",
71
- role: "assistant",
72
- id: randomID(),
73
- content: [
74
- {
75
- kind: "file",
76
- mimeType: item.mediaType,
77
- data: item.data,
78
- },
79
- ],
80
- });
42
+ switch (item.type) {
43
+ case "text":
44
+ content.push({
45
+ kind: "message",
46
+ role: "assistant",
47
+ id: randomID(),
48
+ content: [
49
+ {
50
+ kind: "text",
51
+ text: item.text,
52
+ providerMetadata: item.providerMetadata,
53
+ },
54
+ ],
55
+ providerMetadata: item.providerMetadata,
56
+ });
57
+ break;
58
+
59
+ case "reasoning":
60
+ content.push({
61
+ kind: "reasoning",
62
+ text: item.text,
63
+ providerMetadata: item.providerMetadata,
64
+ });
65
+ break;
66
+
67
+ case "tool-call":
68
+ content.push({
69
+ kind: "tool-call",
70
+ callId: item.toolCallId,
71
+ toolId: item.toolName,
72
+ state: IN_PROGRESS,
73
+ arguments: item.input || "{}",
74
+ providerMetadata: item.providerMetadata,
75
+ });
76
+ break;
77
+
78
+ case "tool-result":
79
+ content.push({
80
+ kind: "tool-result",
81
+ callId: item.toolCallId,
82
+ toolId: item.toolName,
83
+ state: item.isError ? FAILED : COMPLETED,
84
+ result: item.isError ? null : item.result,
85
+ error: item.isError
86
+ ? typeof item.result === "string"
87
+ ? item.result
88
+ : JSON.stringify(item.result)
89
+ : null,
90
+ providerMetadata: item.providerMetadata,
91
+ });
92
+ break;
93
+
94
+ case "file":
95
+ content.push({
96
+ kind: "message",
97
+ role: "assistant",
98
+ id: randomID(),
99
+ content: [
100
+ {
101
+ kind: "file",
102
+ mimeType: item.mediaType,
103
+ data: item.data,
104
+ },
105
+ ],
106
+ });
107
+ break;
108
+
109
+ case "source":
110
+ // Source type is intentionally not handled
111
+ break;
81
112
  }
82
- // TODO: Handle other content types (source, tool-result)
83
113
  }
84
114
 
85
115
  const finishReason = FINISH_REASON.decode(result.finishReason);
@@ -1,4 +1,5 @@
1
- import type { Codec, LanguageModelRequestSettings } from "@kernl-sdk/protocol";
1
+ import type { Codec } from "@kernl-sdk/shared/lib";
2
+ import type { LanguageModelRequestSettings } from "@kernl-sdk/protocol";
2
3
  import type {
3
4
  LanguageModelV3ToolChoice,
4
5
  SharedV3ProviderOptions,
@@ -1,6 +1,12 @@
1
- import type { Codec, LanguageModelStreamEvent } from "@kernl-sdk/protocol";
1
+ import type { Codec } from "@kernl-sdk/shared/lib";
2
+ import {
3
+ type LanguageModelStreamEvent,
4
+ COMPLETED,
5
+ FAILED,
6
+ IN_PROGRESS,
7
+ } from "@kernl-sdk/protocol";
2
8
  import type { LanguageModelV3StreamPart } from "@ai-sdk/provider";
3
- import { COMPLETED, FAILED } from "@kernl-sdk/protocol";
9
+
4
10
  import { WARNING } from "./response";
5
11
 
6
12
  /**
@@ -37,7 +43,9 @@ export const STREAM_PART: Codec<
37
43
  throw new Error("codec:unimplemented");
38
44
  },
39
45
 
40
- decode: (part) => {
46
+ decode: (
47
+ part: LanguageModelV3StreamPart,
48
+ ): LanguageModelStreamEvent | null => {
41
49
  switch (part.type) {
42
50
  case "text-start":
43
51
  return {
@@ -112,32 +120,23 @@ export const STREAM_PART: Codec<
112
120
  kind: "tool-call",
113
121
  callId: part.toolCallId,
114
122
  toolId: part.toolName,
115
- state: COMPLETED,
116
- arguments: part.input,
123
+ state: IN_PROGRESS,
124
+ arguments: part.input || "{}",
117
125
  providerMetadata: part.providerMetadata,
118
126
  };
119
127
 
120
128
  case "tool-result":
121
- // Provider-defined tools can stream tool results
129
+ // provider-defined tools can stream tool results
122
130
  return {
123
131
  kind: "tool-result",
124
132
  callId: part.toolCallId,
125
133
  toolId: part.toolName,
126
134
  state: part.isError ? FAILED : COMPLETED,
127
- result: part.result,
135
+ result: part.isError ? null : part.result,
128
136
  error: part.isError ? String(part.result) : null,
129
137
  providerMetadata: part.providerMetadata,
130
138
  };
131
139
 
132
- case "file":
133
- case "source":
134
- // These don't have direct Kernl equivalents in streaming
135
- // Could be handled as raw events
136
- return {
137
- kind: "raw",
138
- rawValue: part,
139
- };
140
-
141
140
  case "stream-start":
142
141
  return {
143
142
  kind: "stream-start",
@@ -170,13 +169,16 @@ export const STREAM_PART: Codec<
170
169
  rawValue: part.rawValue,
171
170
  };
172
171
 
172
+ // - unknown or no equivalent -
173
173
  case "response-metadata":
174
- // Kernl doesn't have a specific event for response metadata
175
- // Could be passed through as raw or ignored
176
- return null;
174
+ case "file":
175
+ case "source":
176
+ return {
177
+ kind: "raw",
178
+ rawValue: part,
179
+ };
177
180
 
178
181
  default:
179
- // Unknown event type
180
182
  return null;
181
183
  }
182
184
  },
@@ -1,5 +1,5 @@
1
+ import type { Codec } from "@kernl-sdk/shared/lib";
1
2
  import type {
2
- Codec,
3
3
  LanguageModelTool,
4
4
  LanguageModelToolChoice,
5
5
  } from "@kernl-sdk/protocol";
@@ -0,0 +1,409 @@
1
+ import {
2
+ validateUIMessages,
3
+ type UIMessage,
4
+ type UIDataTypes,
5
+ type UITools,
6
+ type ToolUIPart,
7
+ type DynamicToolUIPart,
8
+ type DataUIPart,
9
+ } from "ai";
10
+
11
+ import type { Codec } from "@kernl-sdk/shared/lib";
12
+ import {
13
+ type LanguageModelItem,
14
+ type MessagePart,
15
+ type Reasoning,
16
+ type ToolCall,
17
+ type ToolResult,
18
+ type JSONValue,
19
+ IN_PROGRESS,
20
+ COMPLETED,
21
+ FAILED,
22
+ } from "@kernl-sdk/protocol";
23
+
24
+ /**
25
+ * Converter for transforming Vercel AI SDK UIMessage format (used by useChat hook)
26
+ * to kernl's LanguageModelItem format.
27
+ *
28
+ * @example
29
+ * ```typescript
30
+ * import { UIMessageCodec } from '@kernl-sdk/ai';
31
+ *
32
+ * // Validate and convert incoming UI message to kernl format
33
+ * const items = await UIMessageCodec.decode(uiMessage);
34
+ * ```
35
+ */
36
+ export const UIMessageCodec: AsyncCodec<LanguageModelItem, UIMessage> = {
37
+ /**
38
+ * Convert from kernl LanguageModelItem to AI SDK UIMessage.
39
+ *
40
+ * NOTE: use historyToUIMessages() instead since the AI SDK groups assistant parts together (must process as a group).
41
+ */
42
+ encode: (item: LanguageModelItem): UIMessage => {
43
+ throw new Error("UIMessageCodec.encode: Unimplemented");
44
+ },
45
+
46
+ /**
47
+ * Convert from AI SDK UIMessage to kernl LanguageModelItems.
48
+ *
49
+ * This validates the message structure using AI SDK's built-in validation,
50
+ * then converts it to kernl's internal format.
51
+ *
52
+ * @throws {Error} If validation fails or unsupported message types are encountered
53
+ */
54
+ decode: async <
55
+ METADATA = unknown,
56
+ DATA_PARTS extends UIDataTypes = UIDataTypes,
57
+ TOOLS extends UITools = UITools,
58
+ >(
59
+ message: UIMessage<METADATA, DATA_PARTS, TOOLS>,
60
+ ): Promise<LanguageModelItem[]> => {
61
+ const [m] = await validateUIMessages({ messages: [message] });
62
+
63
+ const items: LanguageModelItem[] = [];
64
+ const mparts: MessagePart[] = [];
65
+
66
+ for (const part of m.parts) {
67
+ // --- tool parts ---
68
+ if (part.type === "dynamic-tool" || part.type.startsWith("tool-")) {
69
+ items.push(
70
+ ...TOOL_UI_PART.decode(part as ToolUIPart | DynamicToolUIPart),
71
+ );
72
+ continue;
73
+ }
74
+
75
+ // --- data parts ---
76
+ if (part.type.startsWith("data-")) {
77
+ const name = part.type.replace(/^data-/, "");
78
+ const p = part as DataUIPart<UIDataTypes>;
79
+ mparts.push({
80
+ kind: "data",
81
+ data: { [name]: p.data },
82
+ });
83
+ continue;
84
+ }
85
+
86
+ switch (part.type) {
87
+ // -- message parts ---
88
+ case "text":
89
+ mparts.push({ ...part, kind: "text" });
90
+ break;
91
+
92
+ case "file": {
93
+ const base64 = part.url.match(/^data:[^;]+;base64,(.+)$/);
94
+ if (base64) {
95
+ // :a: - base64 data URL - extract and store as data
96
+ mparts.push({
97
+ kind: "file",
98
+ mimeType: part.mediaType,
99
+ filename: part.filename,
100
+ data: base64[1],
101
+ providerMetadata: part.providerMetadata,
102
+ });
103
+ } else {
104
+ // :b: - non-base64 data URL or regular URL - store as uri
105
+ mparts.push({
106
+ kind: "file",
107
+ mimeType: part.mediaType,
108
+ filename: part.filename,
109
+ uri: part.url,
110
+ providerMetadata: part.providerMetadata,
111
+ });
112
+ }
113
+ break;
114
+ }
115
+
116
+ case "reasoning": {
117
+ const r: Reasoning = {
118
+ kind: "reasoning",
119
+ text: part.text,
120
+ providerMetadata: part.providerMetadata,
121
+ };
122
+ items.push(r);
123
+ break;
124
+ }
125
+
126
+ // - skip -
127
+ // case "source-*": - just noting for exhaustiveness
128
+ case "step-start":
129
+ default:
130
+ break;
131
+ }
132
+ }
133
+
134
+ // add the message with all collected message parts (if any)
135
+ if (mparts.length > 0) {
136
+ items.unshift({
137
+ kind: "message",
138
+ id: m.id,
139
+ role: m.role,
140
+ content: mparts,
141
+ metadata: m.metadata as Record<string, unknown> | undefined,
142
+ providerMetadata: undefined, // Message-level providerMetadata not in UIMessage
143
+ });
144
+ }
145
+
146
+ return items;
147
+ },
148
+ };
149
+
150
+ /**
151
+ * Codec for converting AI SDK tool parts to kernl ToolCall/ToolResult items.
152
+ */
153
+ const TOOL_UI_PART: Codec<
154
+ (ToolCall | ToolResult)[],
155
+ ToolUIPart | DynamicToolUIPart
156
+ > = {
157
+ encode: (
158
+ _items: (ToolCall | ToolResult)[],
159
+ ): ToolUIPart | DynamicToolUIPart => {
160
+ throw new Error("TOOL_PART.encode: Not yet implemented");
161
+ },
162
+
163
+ decode: (part: ToolUIPart | DynamicToolUIPart): (ToolCall | ToolResult)[] => {
164
+ const toolId =
165
+ part.type === "dynamic-tool"
166
+ ? part.toolName
167
+ : part.type.replace(/^tool-/, "");
168
+ const callId = part.toolCallId;
169
+
170
+ switch (part.state) {
171
+ case "input-available": {
172
+ const call: ToolCall = {
173
+ kind: "tool-call",
174
+ callId,
175
+ toolId,
176
+ state: IN_PROGRESS,
177
+ arguments: JSON.stringify(part.input ?? {}),
178
+ providerMetadata: part.callProviderMetadata,
179
+ };
180
+ return [call];
181
+ }
182
+
183
+ case "output-available": {
184
+ const result: ToolResult = {
185
+ kind: "tool-result",
186
+ callId,
187
+ toolId,
188
+ state: COMPLETED,
189
+ result: part.output as JSONValue | null, // AI SDK ensures tool outputs are JSON-serializable
190
+ error: null,
191
+ providerMetadata: part.callProviderMetadata,
192
+ };
193
+ return [result];
194
+ }
195
+
196
+ case "output-error": {
197
+ const result: ToolResult = {
198
+ kind: "tool-result",
199
+ callId,
200
+ toolId,
201
+ state: FAILED,
202
+ result: null,
203
+ error: part.errorText,
204
+ providerMetadata: part.callProviderMetadata,
205
+ };
206
+ return [result];
207
+ }
208
+
209
+ // TODO AI SDK v6: Add support for approval-requested, approval-responded, output-denied states
210
+ case "input-streaming":
211
+ default:
212
+ return [];
213
+ }
214
+ },
215
+ };
216
+
217
+ /**
218
+ * Async codec for converting between two formats.
219
+ * Similar to Codec but decode is async and returns an array.
220
+ */
221
+ type AsyncCodec<From, To> = {
222
+ encode: (item: From) => To;
223
+ decode: <
224
+ METADATA = unknown,
225
+ DATA_PARTS extends UIDataTypes = UIDataTypes,
226
+ TOOLS extends UITools = UITools,
227
+ >(
228
+ message: To extends UIMessage ? UIMessage<METADATA, DATA_PARTS, TOOLS> : To,
229
+ ) => Promise<From[]>;
230
+ };
231
+
232
+ /**
233
+ * Convert thread history events to AI SDK UIMessages for useChat hook.
234
+ *
235
+ * Groups tool calls with their results and attaches them to the preceding
236
+ * assistant message as parts.
237
+ *
238
+ * @example
239
+ * ```ts
240
+ * import { historyToUIMessages } from '@kernl-sdk/ai';
241
+ *
242
+ * const thread = await kernl.threads.get("thread_123");
243
+ * const messages = historyToUIMessages(thread.history);
244
+ * ```
245
+ */
246
+ export function historyToUIMessages(items: LanguageModelItem[]): UIMessage[] {
247
+ const messages: UIMessage[] = [];
248
+
249
+ for (let i = 0; i < items.length; i++) {
250
+ const item = items[i];
251
+
252
+ if (item.kind === "message") {
253
+ const parts = [];
254
+
255
+ // add message content parts
256
+ for (const part of item.content) {
257
+ switch (part.kind) {
258
+ case "text":
259
+ parts.push({
260
+ type: "text" as const,
261
+ text: part.text,
262
+ ...(part.providerMetadata && {
263
+ providerMetadata: part.providerMetadata as any,
264
+ }),
265
+ });
266
+ break;
267
+
268
+ case "file":
269
+ parts.push({
270
+ type: "file" as const,
271
+ url: part.uri || `data:${part.mimeType};base64,${part.data}`,
272
+ mediaType: part.mimeType,
273
+ filename: part.filename,
274
+ ...(part.providerMetadata && {
275
+ providerMetadata: part.providerMetadata as any,
276
+ }),
277
+ });
278
+ break;
279
+
280
+ case "data":
281
+ for (const [name, value] of Object.entries(part.data)) {
282
+ parts.push({ type: `data-${name}` as const, data: value });
283
+ }
284
+ break;
285
+ }
286
+ }
287
+
288
+ // look ahead for tool calls/results and reasoning that belong to this message
289
+ if (item.role === "assistant") {
290
+ let j = i + 1;
291
+ const toolMap = new Map<
292
+ string,
293
+ {
294
+ call?: Extract<LanguageModelItem, { kind: "tool-call" }>;
295
+ result?: Extract<LanguageModelItem, { kind: "tool-result" }>;
296
+ }
297
+ >();
298
+ const reasoningParts: Extract<
299
+ LanguageModelItem,
300
+ { kind: "reasoning" }
301
+ >[] = [];
302
+
303
+ while (j < items.length && items[j].kind !== "message") {
304
+ const next = items[j];
305
+
306
+ if (next.kind === "tool-call") {
307
+ const existing = toolMap.get(next.callId) || {};
308
+ toolMap.set(next.callId, { ...existing, call: next });
309
+ } else if (next.kind === "tool-result") {
310
+ const existing = toolMap.get(next.callId) || {};
311
+ toolMap.set(next.callId, { ...existing, result: next });
312
+ } else if (next.kind === "reasoning") {
313
+ reasoningParts.push(next);
314
+ }
315
+
316
+ j++;
317
+ }
318
+
319
+ // add reasoning parts first
320
+ for (const reasoning of reasoningParts) {
321
+ parts.push({
322
+ type: "reasoning" as const,
323
+ text: reasoning.text,
324
+ ...(reasoning.providerMetadata && {
325
+ providerMetadata: reasoning.providerMetadata as any,
326
+ }),
327
+ });
328
+ }
329
+
330
+ // convert tool pairs to UI parts
331
+ for (const [callId, { call, result }] of toolMap.entries()) {
332
+ if (!call) continue; // orphaned result, skip
333
+
334
+ const input = JSON.parse(call.arguments);
335
+
336
+ if (result) {
337
+ if (result.state === "failed") {
338
+ parts.push({
339
+ type: `tool-${call.toolId}` as const,
340
+ toolCallId: callId,
341
+ toolName: call.toolId,
342
+ input,
343
+ state: "output-error" as const,
344
+ errorText: result.error || "",
345
+ ...(call.providerMetadata && {
346
+ callProviderMetadata: call.providerMetadata as any,
347
+ }),
348
+ });
349
+ } else {
350
+ parts.push({
351
+ type: `tool-${call.toolId}` as const,
352
+ toolCallId: callId,
353
+ toolName: call.toolId,
354
+ input,
355
+ state: "output-available" as const,
356
+ output: result.result,
357
+ ...(call.providerMetadata && {
358
+ callProviderMetadata: call.providerMetadata as any,
359
+ }),
360
+ });
361
+ }
362
+ } else {
363
+ parts.push({
364
+ type: `tool-${call.toolId}` as const,
365
+ toolCallId: callId,
366
+ toolName: call.toolId,
367
+ input,
368
+ state: "input-available" as const,
369
+ ...(call.providerMetadata && {
370
+ callProviderMetadata: call.providerMetadata as any,
371
+ }),
372
+ });
373
+ }
374
+ }
375
+
376
+ // skip over the tool and reasoning events we just processed
377
+ i = j - 1;
378
+ }
379
+
380
+ messages.push({
381
+ id: item.id,
382
+ role: item.role,
383
+ parts,
384
+ });
385
+ } else if (item.kind === "reasoning") {
386
+ // add reasoning as a part of the last assistant message, or create new message
387
+ const lastMessage = messages[messages.length - 1];
388
+ const rpart = {
389
+ type: "reasoning" as const,
390
+ text: item.text,
391
+ ...(item.providerMetadata && {
392
+ providerMetadata: item.providerMetadata as any,
393
+ }),
394
+ };
395
+
396
+ if (lastMessage && lastMessage.role === "assistant") {
397
+ lastMessage.parts.push(rpart);
398
+ } else {
399
+ messages.push({
400
+ id: item.id || `reasoning-${i}`,
401
+ role: "assistant",
402
+ parts: [rpart],
403
+ });
404
+ }
405
+ }
406
+ }
407
+
408
+ return messages;
409
+ }