ai 5.0.0-canary.23 → 5.0.0-canary.24

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -467,102 +467,84 @@ import {
467
467
 
468
468
  // src/ui-message-stream/ui-message-stream-parts.ts
469
469
  import { z } from "zod";
470
- var toolCallSchema = z.object({
471
- toolCallId: z.string(),
472
- toolName: z.string(),
473
- args: z.unknown()
474
- });
475
- var toolResultValueSchema = z.object({
476
- toolCallId: z.string(),
477
- result: z.unknown(),
478
- providerMetadata: z.any().optional()
479
- });
480
- var sourceSchema = z.object({
481
- type: z.literal("source"),
482
- sourceType: z.literal("url"),
483
- id: z.string(),
484
- url: z.string(),
485
- title: z.string().optional(),
486
- providerMetadata: z.any().optional()
487
- // Use z.any() for generic metadata
488
- });
489
- var uiMessageStreamPartSchema = z.discriminatedUnion("type", [
470
+ var uiMessageStreamPartSchema = z.union([
490
471
  z.object({
491
472
  type: z.literal("text"),
492
- value: z.string()
473
+ text: z.string()
493
474
  }),
494
475
  z.object({
495
476
  type: z.literal("error"),
496
- value: z.string()
477
+ errorText: z.string()
497
478
  }),
498
479
  z.object({
499
- type: z.literal("tool-call"),
500
- value: toolCallSchema
480
+ type: z.literal("tool-call-streaming-start"),
481
+ toolCallId: z.string(),
482
+ toolName: z.string()
501
483
  }),
502
484
  z.object({
503
- type: z.literal("tool-result"),
504
- value: toolResultValueSchema
485
+ type: z.literal("tool-call-delta"),
486
+ toolCallId: z.string(),
487
+ argsTextDelta: z.string()
505
488
  }),
506
489
  z.object({
507
- type: z.literal("tool-call-streaming-start"),
508
- value: z.object({ toolCallId: z.string(), toolName: z.string() })
490
+ type: z.literal("tool-call"),
491
+ toolCallId: z.string(),
492
+ toolName: z.string(),
493
+ args: z.unknown()
509
494
  }),
510
495
  z.object({
511
- type: z.literal("tool-call-delta"),
512
- value: z.object({ toolCallId: z.string(), argsTextDelta: z.string() })
496
+ type: z.literal("tool-result"),
497
+ toolCallId: z.string(),
498
+ result: z.unknown(),
499
+ providerMetadata: z.any().optional()
513
500
  }),
514
501
  z.object({
515
502
  type: z.literal("reasoning"),
516
- value: z.object({
517
- text: z.string(),
518
- providerMetadata: z.record(z.any()).optional()
519
- })
503
+ text: z.string(),
504
+ providerMetadata: z.record(z.any()).optional()
520
505
  }),
521
506
  z.object({
522
507
  type: z.literal("source"),
523
- value: sourceSchema
508
+ sourceType: z.literal("url"),
509
+ id: z.string(),
510
+ url: z.string(),
511
+ title: z.string().optional(),
512
+ providerMetadata: z.any().optional()
513
+ // Use z.any() for generic metadata
524
514
  }),
525
515
  z.object({
526
516
  type: z.literal("file"),
527
- value: z.object({
528
- url: z.string(),
529
- mediaType: z.string()
530
- })
517
+ url: z.string(),
518
+ mediaType: z.string()
519
+ }),
520
+ z.object({
521
+ type: z.string().startsWith("data-"),
522
+ id: z.string().optional(),
523
+ data: z.unknown()
531
524
  }),
532
525
  z.object({
533
526
  type: z.literal("metadata"),
534
- value: z.object({
535
- metadata: z.unknown()
536
- })
527
+ value: z.object({ metadata: z.unknown() })
537
528
  }),
538
529
  z.object({
539
530
  type: z.literal("start-step"),
540
- value: z.object({
541
- metadata: z.unknown()
542
- })
531
+ metadata: z.unknown().optional()
543
532
  }),
544
533
  z.object({
545
534
  type: z.literal("finish-step"),
546
- value: z.object({
547
- metadata: z.unknown()
548
- })
535
+ metadata: z.unknown().optional()
549
536
  }),
550
537
  z.object({
551
538
  type: z.literal("start"),
552
- value: z.object({
553
- messageId: z.string().optional(),
554
- metadata: z.unknown()
555
- })
539
+ messageId: z.string().optional(),
540
+ metadata: z.unknown().optional()
556
541
  }),
557
542
  z.object({
558
543
  type: z.literal("finish"),
559
- value: z.object({
560
- metadata: z.unknown()
561
- })
544
+ metadata: z.unknown().optional()
562
545
  }),
563
546
  z.object({
564
- type: z.literal("reasoning-part-finish"),
565
- value: z.null()
547
+ type: z.literal("reasoning-part-finish")
566
548
  })
567
549
  ]);
568
550
 
@@ -1003,14 +985,14 @@ function processUIMessageStream({
1003
985
  }) {
1004
986
  return stream.pipeThrough(
1005
987
  new TransformStream({
1006
- async transform(chunk, controller) {
988
+ async transform(part, controller) {
1007
989
  await runUpdateMessageJob(async ({ state, write }) => {
1008
990
  function updateToolInvocationPart(toolCallId, invocation) {
1009
- const part = state.message.parts.find(
1010
- (part2) => part2.type === "tool-invocation" && part2.toolInvocation.toolCallId === toolCallId
991
+ const part2 = state.message.parts.find(
992
+ (part3) => part3.type === "tool-invocation" && part3.toolInvocation.toolCallId === toolCallId
1011
993
  );
1012
- if (part != null) {
1013
- part.toolInvocation = invocation;
994
+ if (part2 != null) {
995
+ part2.toolInvocation = invocation;
1014
996
  } else {
1015
997
  state.message.parts.push({
1016
998
  type: "tool-invocation",
@@ -1030,17 +1012,16 @@ function processUIMessageStream({
1030
1012
  state.message.metadata = mergedMetadata;
1031
1013
  }
1032
1014
  }
1033
- const { type, value } = chunk;
1034
- switch (type) {
1015
+ switch (part.type) {
1035
1016
  case "text": {
1036
1017
  if (state.activeTextPart == null) {
1037
1018
  state.activeTextPart = {
1038
1019
  type: "text",
1039
- text: value
1020
+ text: part.text
1040
1021
  };
1041
1022
  state.message.parts.push(state.activeTextPart);
1042
1023
  } else {
1043
- state.activeTextPart.text += value;
1024
+ state.activeTextPart.text += part.text;
1044
1025
  }
1045
1026
  write();
1046
1027
  break;
@@ -1049,13 +1030,13 @@ function processUIMessageStream({
1049
1030
  if (state.activeReasoningPart == null) {
1050
1031
  state.activeReasoningPart = {
1051
1032
  type: "reasoning",
1052
- text: value.text,
1053
- providerMetadata: value.providerMetadata
1033
+ text: part.text,
1034
+ providerMetadata: part.providerMetadata
1054
1035
  };
1055
1036
  state.message.parts.push(state.activeReasoningPart);
1056
1037
  } else {
1057
- state.activeReasoningPart.text += value.text;
1058
- state.activeReasoningPart.providerMetadata = value.providerMetadata;
1038
+ state.activeReasoningPart.text += part.text;
1039
+ state.activeReasoningPart.providerMetadata = part.providerMetadata;
1059
1040
  }
1060
1041
  write();
1061
1042
  break;
@@ -1069,8 +1050,8 @@ function processUIMessageStream({
1069
1050
  case "file": {
1070
1051
  state.message.parts.push({
1071
1052
  type: "file",
1072
- mediaType: value.mediaType,
1073
- url: value.url
1053
+ mediaType: part.mediaType,
1054
+ url: part.url
1074
1055
  });
1075
1056
  write();
1076
1057
  break;
@@ -1078,39 +1059,45 @@ function processUIMessageStream({
1078
1059
  case "source": {
1079
1060
  state.message.parts.push({
1080
1061
  type: "source",
1081
- source: value
1062
+ source: {
1063
+ sourceType: "url",
1064
+ id: part.id,
1065
+ url: part.url,
1066
+ title: part.title,
1067
+ providerMetadata: part.providerMetadata
1068
+ }
1082
1069
  });
1083
1070
  write();
1084
1071
  break;
1085
1072
  }
1086
1073
  case "tool-call-streaming-start": {
1087
1074
  const toolInvocations = getToolInvocations(state.message);
1088
- state.partialToolCalls[value.toolCallId] = {
1075
+ state.partialToolCalls[part.toolCallId] = {
1089
1076
  text: "",
1090
1077
  step: state.step,
1091
- toolName: value.toolName,
1078
+ toolName: part.toolName,
1092
1079
  index: toolInvocations.length
1093
1080
  };
1094
- updateToolInvocationPart(value.toolCallId, {
1081
+ updateToolInvocationPart(part.toolCallId, {
1095
1082
  state: "partial-call",
1096
1083
  step: state.step,
1097
- toolCallId: value.toolCallId,
1098
- toolName: value.toolName,
1084
+ toolCallId: part.toolCallId,
1085
+ toolName: part.toolName,
1099
1086
  args: void 0
1100
1087
  });
1101
1088
  write();
1102
1089
  break;
1103
1090
  }
1104
1091
  case "tool-call-delta": {
1105
- const partialToolCall = state.partialToolCalls[value.toolCallId];
1106
- partialToolCall.text += value.argsTextDelta;
1092
+ const partialToolCall = state.partialToolCalls[part.toolCallId];
1093
+ partialToolCall.text += part.argsTextDelta;
1107
1094
  const { value: partialArgs } = await parsePartialJson(
1108
1095
  partialToolCall.text
1109
1096
  );
1110
- updateToolInvocationPart(value.toolCallId, {
1097
+ updateToolInvocationPart(part.toolCallId, {
1111
1098
  state: "partial-call",
1112
1099
  step: partialToolCall.step,
1113
- toolCallId: value.toolCallId,
1100
+ toolCallId: part.toolCallId,
1114
1101
  toolName: partialToolCall.toolName,
1115
1102
  args: partialArgs
1116
1103
  });
@@ -1118,22 +1105,25 @@ function processUIMessageStream({
1118
1105
  break;
1119
1106
  }
1120
1107
  case "tool-call": {
1121
- const call = { args: value.args, ...value };
1122
- updateToolInvocationPart(value.toolCallId, {
1108
+ updateToolInvocationPart(part.toolCallId, {
1123
1109
  state: "call",
1124
1110
  step: state.step,
1125
- ...call
1111
+ toolCallId: part.toolCallId,
1112
+ toolName: part.toolName,
1113
+ args: part.args
1126
1114
  });
1127
1115
  write();
1128
1116
  if (onToolCall) {
1129
1117
  const result = await onToolCall({
1130
- toolCall: call
1118
+ toolCall: part
1131
1119
  });
1132
1120
  if (result != null) {
1133
- updateToolInvocationPart(value.toolCallId, {
1121
+ updateToolInvocationPart(part.toolCallId, {
1134
1122
  state: "result",
1135
1123
  step: state.step,
1136
- ...call,
1124
+ toolCallId: part.toolCallId,
1125
+ toolName: part.toolName,
1126
+ args: part.args,
1137
1127
  result
1138
1128
  });
1139
1129
  write();
@@ -1147,25 +1137,24 @@ function processUIMessageStream({
1147
1137
  throw new Error("tool_result must be preceded by a tool_call");
1148
1138
  }
1149
1139
  const toolInvocationIndex = toolInvocations.findIndex(
1150
- (invocation) => invocation.toolCallId === value.toolCallId
1140
+ (invocation) => invocation.toolCallId === part.toolCallId
1151
1141
  );
1152
1142
  if (toolInvocationIndex === -1) {
1153
1143
  throw new Error(
1154
1144
  "tool_result must be preceded by a tool_call with the same toolCallId"
1155
1145
  );
1156
1146
  }
1157
- const result = { result: value.result, ...value };
1158
- updateToolInvocationPart(value.toolCallId, {
1147
+ updateToolInvocationPart(part.toolCallId, {
1159
1148
  ...toolInvocations[toolInvocationIndex],
1160
1149
  state: "result",
1161
- ...result
1150
+ result: part.result
1162
1151
  });
1163
1152
  write();
1164
1153
  break;
1165
1154
  }
1166
1155
  case "start-step": {
1167
1156
  state.message.parts.push({ type: "step-start" });
1168
- await updateMessageMetadata(value.metadata);
1157
+ await updateMessageMetadata(part.metadata);
1169
1158
  write();
1170
1159
  break;
1171
1160
  }
@@ -1173,45 +1162,61 @@ function processUIMessageStream({
1173
1162
  state.step += 1;
1174
1163
  state.activeTextPart = void 0;
1175
1164
  state.activeReasoningPart = void 0;
1176
- await updateMessageMetadata(value.metadata);
1177
- if (value.metadata != null) {
1165
+ await updateMessageMetadata(part.metadata);
1166
+ if (part.metadata != null) {
1178
1167
  write();
1179
1168
  }
1180
1169
  break;
1181
1170
  }
1182
1171
  case "start": {
1183
- if (value.messageId != null) {
1184
- state.message.id = value.messageId;
1172
+ if (part.messageId != null) {
1173
+ state.message.id = part.messageId;
1185
1174
  }
1186
- await updateMessageMetadata(value.metadata);
1187
- if (value.messageId != null || value.metadata != null) {
1175
+ await updateMessageMetadata(part.metadata);
1176
+ if (part.messageId != null || part.metadata != null) {
1188
1177
  write();
1189
1178
  }
1190
1179
  break;
1191
1180
  }
1192
1181
  case "finish": {
1193
- await updateMessageMetadata(value.metadata);
1194
- if (value.metadata != null) {
1182
+ await updateMessageMetadata(part.metadata);
1183
+ if (part.metadata != null) {
1195
1184
  write();
1196
1185
  }
1197
1186
  break;
1198
1187
  }
1199
1188
  case "metadata": {
1200
- await updateMessageMetadata(value.metadata);
1201
- if (value.metadata != null) {
1189
+ await updateMessageMetadata(part.metadata);
1190
+ if (part.metadata != null) {
1202
1191
  write();
1203
1192
  }
1204
1193
  break;
1205
1194
  }
1206
1195
  case "error": {
1207
- throw new Error(value);
1196
+ throw new Error(part.errorText);
1208
1197
  }
1209
1198
  default: {
1210
- const _exhaustiveCheck = type;
1211
- throw new Error(`Unhandled stream part: ${_exhaustiveCheck}`);
1199
+ if (part.type.startsWith("data-")) {
1200
+ const existingPart = part.id != null ? state.message.parts.find(
1201
+ (partArg) => part.type === partArg.type && part.id === partArg.id
1202
+ ) : void 0;
1203
+ if (existingPart != null) {
1204
+ existingPart.value = mergeObjects(
1205
+ existingPart.data,
1206
+ part.data
1207
+ );
1208
+ } else {
1209
+ state.message.parts.push({
1210
+ type: part.type,
1211
+ id: part.id,
1212
+ value: part.data
1213
+ });
1214
+ }
1215
+ write();
1216
+ }
1212
1217
  }
1213
1218
  }
1214
- controller.enqueue(chunk);
1219
+ controller.enqueue(part);
1215
1220
  });
1216
1221
  }
1217
1222
  })
@@ -1225,15 +1230,15 @@ function transformTextToUiMessageStream({
1225
1230
  return stream.pipeThrough(
1226
1231
  new TransformStream({
1227
1232
  start(controller) {
1228
- controller.enqueue({ type: "start", value: {} });
1229
- controller.enqueue({ type: "start-step", value: {} });
1233
+ controller.enqueue({ type: "start" });
1234
+ controller.enqueue({ type: "start-step" });
1230
1235
  },
1231
1236
  async transform(part, controller) {
1232
- controller.enqueue({ type: "text", value: part });
1237
+ controller.enqueue({ type: "text", text: part });
1233
1238
  },
1234
1239
  async flush(controller) {
1235
- controller.enqueue({ type: "finish-step", value: {} });
1236
- controller.enqueue({ type: "finish", value: {} });
1240
+ controller.enqueue({ type: "finish-step" });
1241
+ controller.enqueue({ type: "finish" });
1237
1242
  }
1238
1243
  })
1239
1244
  );
@@ -1453,12 +1458,12 @@ async function callCompletionApi({
1453
1458
  if (!part.success) {
1454
1459
  throw part.error;
1455
1460
  }
1456
- const { type, value } = part.value;
1457
- if (type === "text") {
1458
- result += value;
1461
+ const streamPart = part.value;
1462
+ if (streamPart.type === "text") {
1463
+ result += streamPart.text;
1459
1464
  setCompletion(result);
1460
- } else if (type === "error") {
1461
- throw new Error(value);
1465
+ } else if (streamPart.type === "error") {
1466
+ throw new Error(streamPart.errorText);
1462
1467
  }
1463
1468
  }
1464
1469
  })
@@ -1536,18 +1541,18 @@ var SerialJobExecutor = class {
1536
1541
  function shouldResubmitMessages({
1537
1542
  originalMaxToolInvocationStep,
1538
1543
  originalMessageCount,
1539
- maxSteps,
1544
+ maxSteps: maxSteps2,
1540
1545
  messages
1541
1546
  }) {
1542
1547
  var _a17;
1543
1548
  const lastMessage = messages[messages.length - 1];
1544
1549
  return (
1545
1550
  // check if the feature is enabled:
1546
- maxSteps > 1 && // ensure there is a last message:
1551
+ maxSteps2 > 1 && // ensure there is a last message:
1547
1552
  lastMessage != null && // ensure we actually have new steps (to prevent infinite loops in case of errors):
1548
1553
  (messages.length > originalMessageCount || extractMaxToolInvocationStep(getToolInvocations(lastMessage)) !== originalMaxToolInvocationStep) && // check that next step is possible:
1549
1554
  isAssistantMessageWithCompletedToolCalls(lastMessage) && // limit the number of automatic steps:
1550
- ((_a17 = extractMaxToolInvocationStep(getToolInvocations(lastMessage))) != null ? _a17 : 0) < maxSteps
1555
+ ((_a17 = extractMaxToolInvocationStep(getToolInvocations(lastMessage))) != null ? _a17 : 0) < maxSteps2
1551
1556
  );
1552
1557
  }
1553
1558
  function isAssistantMessageWithCompletedToolCalls(message) {
@@ -1588,7 +1593,7 @@ var ChatStore = class {
1588
1593
  generateId: generateId3,
1589
1594
  messageMetadataSchema,
1590
1595
  transport,
1591
- maxSteps = 1
1596
+ maxSteps: maxSteps2 = 1
1592
1597
  }) {
1593
1598
  this.chats = new Map(
1594
1599
  Object.entries(chats).map(([id, state]) => [
@@ -1602,7 +1607,7 @@ var ChatStore = class {
1602
1607
  }
1603
1608
  ])
1604
1609
  );
1605
- this.maxSteps = maxSteps;
1610
+ this.maxSteps = maxSteps2;
1606
1611
  this.transport = transport;
1607
1612
  this.subscribers = /* @__PURE__ */ new Set();
1608
1613
  this.generateId = generateId3 != null ? generateId3 : generateIdFunc;
@@ -1660,14 +1665,6 @@ var ChatStore = class {
1660
1665
  this.getChat(id).messages = [...messages];
1661
1666
  this.emit({ type: "chat-messages-changed", chatId: id });
1662
1667
  }
1663
- appendMessage({
1664
- id,
1665
- message
1666
- }) {
1667
- const chat = this.getChat(id);
1668
- chat.messages = [...chat.messages, { ...message }];
1669
- this.emit({ type: "chat-messages-changed", chatId: id });
1670
- }
1671
1668
  removeAssistantResponse(id) {
1672
1669
  const chat = this.getChat(id);
1673
1670
  const lastMessage = chat.messages[chat.messages.length - 1];
@@ -1810,6 +1807,7 @@ var ChatStore = class {
1810
1807
  }) {
1811
1808
  const self = this;
1812
1809
  const chat = this.getChat(chatId);
1810
+ this.setMessages({ id: chatId, messages: chatMessages });
1813
1811
  this.setStatus({ id: chatId, status: "submitted", error: void 0 });
1814
1812
  const messageCount = chatMessages.length;
1815
1813
  const maxStep = extractMaxToolInvocationStep(
@@ -2154,7 +2152,7 @@ function defaultChatStore({
2154
2152
  prepareRequestBody,
2155
2153
  generateId: generateId3 = generateIdFunc2,
2156
2154
  messageMetadataSchema,
2157
- maxSteps = 1,
2155
+ maxSteps: maxSteps2 = 1,
2158
2156
  chats
2159
2157
  }) {
2160
2158
  return new ChatStore({
@@ -2169,7 +2167,7 @@ function defaultChatStore({
2169
2167
  }),
2170
2168
  generateId: generateId3,
2171
2169
  messageMetadataSchema,
2172
- maxSteps,
2170
+ maxSteps: maxSteps2,
2173
2171
  chats
2174
2172
  });
2175
2173
  }
@@ -2209,7 +2207,7 @@ function createUIMessageStream({
2209
2207
  safeEnqueue(value);
2210
2208
  }
2211
2209
  })().catch((error) => {
2212
- safeEnqueue({ type: "error", value: onError(error) });
2210
+ safeEnqueue({ type: "error", errorText: onError(error) });
2213
2211
  })
2214
2212
  );
2215
2213
  },
@@ -2218,12 +2216,12 @@ function createUIMessageStream({
2218
2216
  if (result) {
2219
2217
  ongoingStreamPromises.push(
2220
2218
  result.catch((error) => {
2221
- safeEnqueue({ type: "error", value: onError(error) });
2219
+ safeEnqueue({ type: "error", errorText: onError(error) });
2222
2220
  })
2223
2221
  );
2224
2222
  }
2225
2223
  } catch (error) {
2226
- safeEnqueue({ type: "error", value: onError(error) });
2224
+ safeEnqueue({ type: "error", errorText: onError(error) });
2227
2225
  }
2228
2226
  const waitForStreams = new Promise(async (resolve) => {
2229
2227
  while (ongoingStreamPromises.length > 0) {
@@ -3879,7 +3877,7 @@ async function standardizePrompt(prompt) {
3879
3877
  if (!validationResult.success) {
3880
3878
  throw new InvalidPromptError2({
3881
3879
  prompt,
3882
- message: "messages must be an array of ModelMessage",
3880
+ message: "The messages must be a ModelMessage[]. If you have passed a UIMessage[], you can use convertToModelMessages to convert them.",
3883
3881
  cause: validationResult.error
3884
3882
  });
3885
3883
  }
@@ -5524,6 +5522,19 @@ var DefaultStepResult = class {
5524
5522
  }
5525
5523
  };
5526
5524
 
5525
+ // core/generate-text/stop-condition.ts
5526
+ function maxSteps(maxSteps2) {
5527
+ return ({ steps }) => steps.length >= maxSteps2;
5528
+ }
5529
+ function hasToolCall(toolName) {
5530
+ return ({ steps }) => {
5531
+ var _a17, _b, _c;
5532
+ return (_c = (_b = (_a17 = steps[steps.length - 1]) == null ? void 0 : _a17.toolCalls) == null ? void 0 : _b.some(
5533
+ (toolCall) => toolCall.toolName === toolName
5534
+ )) != null ? _c : false;
5535
+ };
5536
+ }
5537
+
5527
5538
  // core/generate-text/to-response-messages.ts
5528
5539
  function toResponseMessages({
5529
5540
  content: inputContent,
@@ -5597,7 +5608,7 @@ async function generateText({
5597
5608
  maxRetries: maxRetriesArg,
5598
5609
  abortSignal,
5599
5610
  headers,
5600
- maxSteps = 1,
5611
+ continueUntil = maxSteps(1),
5601
5612
  experimental_output: output,
5602
5613
  experimental_telemetry: telemetry,
5603
5614
  providerOptions,
@@ -5611,13 +5622,6 @@ async function generateText({
5611
5622
  onStepFinish,
5612
5623
  ...settings
5613
5624
  }) {
5614
- if (maxSteps < 1) {
5615
- throw new InvalidArgumentError({
5616
- parameter: "maxSteps",
5617
- value: maxSteps,
5618
- message: "maxSteps must be at least 1"
5619
- });
5620
- }
5621
5625
  const { maxRetries, retry } = prepareRetries({ maxRetries: maxRetriesArg });
5622
5626
  const callSettings = prepareCallSettings(settings);
5623
5627
  const baseTelemetryAttributes = getBaseTelemetryAttributes({
@@ -5648,8 +5652,7 @@ async function generateText({
5648
5652
  // specific settings that only make sense on the outer level:
5649
5653
  "ai.prompt": {
5650
5654
  input: () => JSON.stringify({ system, prompt, messages })
5651
- },
5652
- "ai.settings.maxSteps": maxSteps
5655
+ }
5653
5656
  }
5654
5657
  }),
5655
5658
  tracer,
@@ -5659,7 +5662,6 @@ async function generateText({
5659
5662
  let currentModelResponse;
5660
5663
  let currentToolCalls = [];
5661
5664
  let currentToolResults = [];
5662
- let stepCount = 0;
5663
5665
  const responseMessages = [];
5664
5666
  const steps = [];
5665
5667
  do {
@@ -5670,8 +5672,7 @@ async function generateText({
5670
5672
  const prepareStepResult = await (prepareStep == null ? void 0 : prepareStep({
5671
5673
  model,
5672
5674
  steps,
5673
- maxSteps,
5674
- stepNumber: stepCount
5675
+ stepNumber: steps.length
5675
5676
  }));
5676
5677
  const promptMessages = await convertToLanguageModelPrompt({
5677
5678
  prompt: {
@@ -5826,9 +5827,12 @@ async function generateText({
5826
5827
  });
5827
5828
  steps.push(currentStepResult);
5828
5829
  await (onStepFinish == null ? void 0 : onStepFinish(currentStepResult));
5829
- } while (++stepCount < maxSteps && // there are tool calls:
5830
- currentToolCalls.length > 0 && // all current tool calls have results:
5831
- currentToolResults.length === currentToolCalls.length);
5830
+ } while (
5831
+ // there are tool calls:
5832
+ currentToolCalls.length > 0 && // all current tool calls have results:
5833
+ currentToolResults.length === currentToolCalls.length && // continue until the stop condition is met:
5834
+ !await continueUntil({ steps })
5835
+ );
5832
5836
  span.setAttributes(
5833
5837
  selectTelemetryAttributes({
5834
5838
  telemetry,
@@ -6392,7 +6396,7 @@ function streamText({
6392
6396
  maxRetries,
6393
6397
  abortSignal,
6394
6398
  headers,
6395
- maxSteps = 1,
6399
+ maxSteps: maxSteps2 = 1,
6396
6400
  experimental_output: output,
6397
6401
  experimental_telemetry: telemetry,
6398
6402
  providerOptions,
@@ -6428,7 +6432,7 @@ function streamText({
6428
6432
  transforms: asArray(transform),
6429
6433
  activeTools,
6430
6434
  repairToolCall,
6431
- maxSteps,
6435
+ maxSteps: maxSteps2,
6432
6436
  output,
6433
6437
  providerOptions,
6434
6438
  onChunk,
@@ -6505,7 +6509,7 @@ var DefaultStreamTextResult = class {
6505
6509
  transforms,
6506
6510
  activeTools,
6507
6511
  repairToolCall,
6508
- maxSteps,
6512
+ maxSteps: maxSteps2,
6509
6513
  output,
6510
6514
  providerOptions,
6511
6515
  now: now2,
@@ -6519,10 +6523,10 @@ var DefaultStreamTextResult = class {
6519
6523
  this.totalUsagePromise = new DelayedPromise();
6520
6524
  this.finishReasonPromise = new DelayedPromise();
6521
6525
  this.stepsPromise = new DelayedPromise();
6522
- if (maxSteps < 1) {
6526
+ if (maxSteps2 < 1) {
6523
6527
  throw new InvalidArgumentError({
6524
6528
  parameter: "maxSteps",
6525
- value: maxSteps,
6529
+ value: maxSteps2,
6526
6530
  message: "maxSteps must be at least 1"
6527
6531
  });
6528
6532
  }
@@ -6679,6 +6683,13 @@ var DefaultStreamTextResult = class {
6679
6683
  this.addStream = stitchableStream.addStream;
6680
6684
  this.closeStream = stitchableStream.close;
6681
6685
  let stream = stitchableStream.stream;
6686
+ stream = stream.pipeThrough(
6687
+ new TransformStream({
6688
+ start(controller) {
6689
+ controller.enqueue({ type: "start" });
6690
+ }
6691
+ })
6692
+ );
6682
6693
  for (const transform of transforms) {
6683
6694
  stream = stream.pipeThrough(
6684
6695
  transform({
@@ -6713,7 +6724,7 @@ var DefaultStreamTextResult = class {
6713
6724
  "ai.prompt": {
6714
6725
  input: () => JSON.stringify({ system, prompt, messages })
6715
6726
  },
6716
- "ai.settings.maxSteps": maxSteps
6727
+ "ai.settings.maxSteps": maxSteps2
6717
6728
  }
6718
6729
  }),
6719
6730
  tracer,
@@ -6851,7 +6862,6 @@ var DefaultStreamTextResult = class {
6851
6862
  var _a17, _b, _c, _d;
6852
6863
  if (chunk.type === "stream-start") {
6853
6864
  warnings = chunk.warnings;
6854
- controller.enqueue({ type: "start" });
6855
6865
  return;
6856
6866
  }
6857
6867
  if (stepFirstChunk) {
@@ -7001,7 +7011,7 @@ var DefaultStreamTextResult = class {
7001
7011
  }
7002
7012
  });
7003
7013
  const combinedUsage = addLanguageModelUsage(usage, stepUsage);
7004
- if (currentStep + 1 < maxSteps && // there are tool calls:
7014
+ if (currentStep + 1 < maxSteps2 && // there are tool calls:
7005
7015
  stepToolCalls.length > 0 && // all current tool calls have results:
7006
7016
  stepToolResults.length === stepToolCalls.length) {
7007
7017
  responseMessages.push(
@@ -7186,125 +7196,122 @@ var DefaultStreamTextResult = class {
7186
7196
  const partType = part.type;
7187
7197
  switch (partType) {
7188
7198
  case "text": {
7189
- controller.enqueue({ type: "text", value: part.text });
7199
+ controller.enqueue({
7200
+ type: "text",
7201
+ text: part.text
7202
+ });
7190
7203
  break;
7191
7204
  }
7192
7205
  case "reasoning": {
7193
7206
  if (sendReasoning) {
7194
- controller.enqueue({ type: "reasoning", value: part });
7207
+ controller.enqueue({
7208
+ type: "reasoning",
7209
+ text: part.text,
7210
+ providerMetadata: part.providerMetadata
7211
+ });
7195
7212
  }
7196
7213
  break;
7197
7214
  }
7198
7215
  case "reasoning-part-finish": {
7199
7216
  if (sendReasoning) {
7200
- controller.enqueue({
7201
- type: "reasoning-part-finish",
7202
- value: null
7203
- });
7217
+ controller.enqueue({ type: "reasoning-part-finish" });
7204
7218
  }
7205
7219
  break;
7206
7220
  }
7207
7221
  case "file": {
7208
7222
  controller.enqueue({
7209
7223
  type: "file",
7210
- value: {
7211
- mediaType: part.file.mediaType,
7212
- url: `data:${part.file.mediaType};base64,${part.file.base64}`
7213
- }
7224
+ mediaType: part.file.mediaType,
7225
+ url: `data:${part.file.mediaType};base64,${part.file.base64}`
7214
7226
  });
7215
7227
  break;
7216
7228
  }
7217
7229
  case "source": {
7218
7230
  if (sendSources) {
7219
- controller.enqueue({ type: "source", value: part });
7231
+ controller.enqueue({
7232
+ type: "source",
7233
+ sourceType: part.sourceType,
7234
+ id: part.id,
7235
+ url: part.url,
7236
+ title: part.title,
7237
+ providerMetadata: part.providerMetadata
7238
+ });
7220
7239
  }
7221
7240
  break;
7222
7241
  }
7223
7242
  case "tool-call-streaming-start": {
7224
7243
  controller.enqueue({
7225
7244
  type: "tool-call-streaming-start",
7226
- value: {
7227
- toolCallId: part.toolCallId,
7228
- toolName: part.toolName
7229
- }
7245
+ toolCallId: part.toolCallId,
7246
+ toolName: part.toolName
7230
7247
  });
7231
7248
  break;
7232
7249
  }
7233
7250
  case "tool-call-delta": {
7234
7251
  controller.enqueue({
7235
7252
  type: "tool-call-delta",
7236
- value: {
7237
- toolCallId: part.toolCallId,
7238
- argsTextDelta: part.argsTextDelta
7239
- }
7253
+ toolCallId: part.toolCallId,
7254
+ argsTextDelta: part.argsTextDelta
7240
7255
  });
7241
7256
  break;
7242
7257
  }
7243
7258
  case "tool-call": {
7244
7259
  controller.enqueue({
7245
7260
  type: "tool-call",
7246
- value: {
7247
- toolCallId: part.toolCallId,
7248
- toolName: part.toolName,
7249
- args: part.args
7250
- }
7261
+ toolCallId: part.toolCallId,
7262
+ toolName: part.toolName,
7263
+ args: part.args
7251
7264
  });
7252
7265
  break;
7253
7266
  }
7254
7267
  case "tool-result": {
7255
7268
  controller.enqueue({
7256
7269
  type: "tool-result",
7257
- value: {
7258
- toolCallId: part.toolCallId,
7259
- result: part.result
7260
- }
7270
+ toolCallId: part.toolCallId,
7271
+ result: part.result
7261
7272
  });
7262
7273
  break;
7263
7274
  }
7264
7275
  case "error": {
7265
7276
  controller.enqueue({
7266
7277
  type: "error",
7267
- value: onError(part.error)
7278
+ errorText: onError(part.error)
7268
7279
  });
7269
7280
  break;
7270
7281
  }
7271
7282
  case "start-step": {
7283
+ const metadata = messageMetadata == null ? void 0 : messageMetadata({ part });
7272
7284
  controller.enqueue({
7273
7285
  type: "start-step",
7274
- value: {
7275
- metadata: messageMetadata == null ? void 0 : messageMetadata({ part })
7276
- }
7286
+ metadata
7277
7287
  });
7278
7288
  break;
7279
7289
  }
7280
7290
  case "finish-step": {
7291
+ const metadata = messageMetadata == null ? void 0 : messageMetadata({ part });
7281
7292
  controller.enqueue({
7282
7293
  type: "finish-step",
7283
- value: {
7284
- metadata: messageMetadata == null ? void 0 : messageMetadata({ part })
7285
- }
7294
+ metadata
7286
7295
  });
7287
7296
  break;
7288
7297
  }
7289
7298
  case "start": {
7290
7299
  if (experimental_sendStart) {
7300
+ const metadata = messageMetadata == null ? void 0 : messageMetadata({ part });
7291
7301
  controller.enqueue({
7292
7302
  type: "start",
7293
- value: {
7294
- messageId,
7295
- metadata: messageMetadata == null ? void 0 : messageMetadata({ part })
7296
- }
7303
+ messageId,
7304
+ metadata
7297
7305
  });
7298
7306
  }
7299
7307
  break;
7300
7308
  }
7301
7309
  case "finish": {
7302
7310
  if (experimental_sendFinish) {
7311
+ const metadata = messageMetadata == null ? void 0 : messageMetadata({ part });
7303
7312
  controller.enqueue({
7304
7313
  type: "finish",
7305
- value: {
7306
- metadata: messageMetadata == null ? void 0 : messageMetadata({ part })
7307
- }
7314
+ metadata
7308
7315
  });
7309
7316
  }
7310
7317
  break;
@@ -8494,9 +8501,11 @@ export {
8494
8501
  generateText,
8495
8502
  getTextFromDataUrl,
8496
8503
  getToolInvocations,
8504
+ hasToolCall,
8497
8505
  isAssistantMessageWithCompletedToolCalls,
8498
8506
  isDeepEqualData,
8499
8507
  jsonSchema2 as jsonSchema,
8508
+ maxSteps,
8500
8509
  modelMessageSchema,
8501
8510
  parsePartialJson,
8502
8511
  pipeTextStreamToResponse,