ai 3.4.17 → 3.4.20

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -96,7 +96,7 @@ __export(streams_exports, {
96
96
  });
97
97
  module.exports = __toCommonJS(streams_exports);
98
98
  var import_ui_utils10 = require("@ai-sdk/ui-utils");
99
- var import_provider_utils11 = require("@ai-sdk/provider-utils");
99
+ var import_provider_utils10 = require("@ai-sdk/provider-utils");
100
100
 
101
101
  // core/index.ts
102
102
  var import_ui_utils6 = require("@ai-sdk/ui-utils");
@@ -676,10 +676,7 @@ var DefaultEmbedManyResult = class {
676
676
  };
677
677
 
678
678
  // core/generate-object/generate-object.ts
679
- var import_provider_utils6 = require("@ai-sdk/provider-utils");
680
-
681
- // core/prompt/convert-to-language-model-prompt.ts
682
- var import_provider_utils3 = require("@ai-sdk/provider-utils");
679
+ var import_provider_utils5 = require("@ai-sdk/provider-utils");
683
680
 
684
681
  // util/download-error.ts
685
682
  var import_provider3 = require("@ai-sdk/provider");
@@ -923,36 +920,21 @@ function splitDataUrl(dataUrl) {
923
920
  async function convertToLanguageModelPrompt({
924
921
  prompt,
925
922
  modelSupportsImageUrls = true,
923
+ modelSupportsUrl = () => false,
926
924
  downloadImplementation = download
927
925
  }) {
928
- const languageModelMessages = [];
929
- if (prompt.system != null) {
930
- languageModelMessages.push({ role: "system", content: prompt.system });
931
- }
932
- const downloadedAssets = modelSupportsImageUrls || prompt.messages == null ? null : await downloadAssets(prompt.messages, downloadImplementation);
933
- const promptType = prompt.type;
934
- switch (promptType) {
935
- case "prompt": {
936
- languageModelMessages.push({
937
- role: "user",
938
- content: [{ type: "text", text: prompt.prompt }]
939
- });
940
- break;
941
- }
942
- case "messages": {
943
- languageModelMessages.push(
944
- ...prompt.messages.map(
945
- (message) => convertToLanguageModelMessage(message, downloadedAssets)
946
- )
947
- );
948
- break;
949
- }
950
- default: {
951
- const _exhaustiveCheck = promptType;
952
- throw new Error(`Unsupported prompt type: ${_exhaustiveCheck}`);
953
- }
954
- }
955
- return languageModelMessages;
926
+ const downloadedAssets = await downloadAssets(
927
+ prompt.messages,
928
+ downloadImplementation,
929
+ modelSupportsImageUrls,
930
+ modelSupportsUrl
931
+ );
932
+ return [
933
+ ...prompt.system != null ? [{ role: "system", content: prompt.system }] : [],
934
+ ...prompt.messages.map(
935
+ (message) => convertToLanguageModelMessage(message, downloadedAssets)
936
+ )
937
+ ];
956
938
  }
957
939
  function convertToLanguageModelMessage(message, downloadedAssets) {
958
940
  const role = message.role;
@@ -974,178 +956,7 @@ function convertToLanguageModelMessage(message, downloadedAssets) {
974
956
  }
975
957
  return {
976
958
  role: "user",
977
- content: message.content.map(
978
- (part) => {
979
- var _a11, _b, _c, _d, _e;
980
- switch (part.type) {
981
- case "text": {
982
- return {
983
- type: "text",
984
- text: part.text,
985
- providerMetadata: part.experimental_providerMetadata
986
- };
987
- }
988
- case "image": {
989
- if (part.image instanceof URL) {
990
- if (downloadedAssets == null) {
991
- return {
992
- type: "image",
993
- image: part.image,
994
- mimeType: part.mimeType,
995
- providerMetadata: part.experimental_providerMetadata
996
- };
997
- } else {
998
- const downloadedImage = downloadedAssets[part.image.toString()];
999
- return {
1000
- type: "image",
1001
- image: downloadedImage.data,
1002
- mimeType: (_a11 = part.mimeType) != null ? _a11 : downloadedImage.mimeType,
1003
- providerMetadata: part.experimental_providerMetadata
1004
- };
1005
- }
1006
- }
1007
- if (typeof part.image === "string") {
1008
- try {
1009
- const url = new URL(part.image);
1010
- switch (url.protocol) {
1011
- case "http:":
1012
- case "https:": {
1013
- if (downloadedAssets == null) {
1014
- return {
1015
- type: "image",
1016
- image: url,
1017
- mimeType: part.mimeType,
1018
- providerMetadata: part.experimental_providerMetadata
1019
- };
1020
- } else {
1021
- const downloadedImage = downloadedAssets[url.toString()];
1022
- return {
1023
- type: "image",
1024
- image: downloadedImage.data,
1025
- mimeType: (_b = part.mimeType) != null ? _b : downloadedImage.mimeType,
1026
- providerMetadata: part.experimental_providerMetadata
1027
- };
1028
- }
1029
- }
1030
- case "data:": {
1031
- try {
1032
- const { mimeType, base64Content } = splitDataUrl(
1033
- part.image
1034
- );
1035
- if (mimeType == null || base64Content == null) {
1036
- throw new Error("Invalid data URL format");
1037
- }
1038
- return {
1039
- type: "image",
1040
- image: convertDataContentToUint8Array(base64Content),
1041
- mimeType,
1042
- providerMetadata: part.experimental_providerMetadata
1043
- };
1044
- } catch (error) {
1045
- throw new Error(
1046
- `Error processing data URL: ${(0, import_provider_utils3.getErrorMessage)(
1047
- message
1048
- )}`
1049
- );
1050
- }
1051
- }
1052
- }
1053
- } catch (_ignored) {
1054
- }
1055
- }
1056
- const imageUint8 = convertDataContentToUint8Array(part.image);
1057
- return {
1058
- type: "image",
1059
- image: imageUint8,
1060
- mimeType: (_c = part.mimeType) != null ? _c : detectImageMimeType(imageUint8),
1061
- providerMetadata: part.experimental_providerMetadata
1062
- };
1063
- }
1064
- case "file": {
1065
- if (part.data instanceof URL) {
1066
- if (downloadedAssets == null) {
1067
- return {
1068
- type: "file",
1069
- data: part.data,
1070
- mimeType: part.mimeType,
1071
- providerMetadata: part.experimental_providerMetadata
1072
- };
1073
- } else {
1074
- const downloadedImage = downloadedAssets[part.data.toString()];
1075
- return {
1076
- type: "file",
1077
- data: (0, import_provider_utils3.convertUint8ArrayToBase64)(downloadedImage.data),
1078
- mimeType: (_d = part.mimeType) != null ? _d : downloadedImage.mimeType,
1079
- providerMetadata: part.experimental_providerMetadata
1080
- };
1081
- }
1082
- }
1083
- if (typeof part.data === "string") {
1084
- try {
1085
- const url = new URL(part.data);
1086
- switch (url.protocol) {
1087
- case "http:":
1088
- case "https:": {
1089
- if (downloadedAssets == null) {
1090
- return {
1091
- type: "file",
1092
- data: url,
1093
- mimeType: part.mimeType,
1094
- providerMetadata: part.experimental_providerMetadata
1095
- };
1096
- } else {
1097
- const downloadedImage = downloadedAssets[url.toString()];
1098
- return {
1099
- type: "file",
1100
- data: (0, import_provider_utils3.convertUint8ArrayToBase64)(
1101
- downloadedImage.data
1102
- ),
1103
- mimeType: (_e = part.mimeType) != null ? _e : downloadedImage.mimeType,
1104
- providerMetadata: part.experimental_providerMetadata
1105
- };
1106
- }
1107
- }
1108
- case "data:": {
1109
- try {
1110
- const { mimeType, base64Content } = splitDataUrl(
1111
- part.data
1112
- );
1113
- if (mimeType == null || base64Content == null) {
1114
- throw new Error("Invalid data URL format");
1115
- }
1116
- return {
1117
- type: "file",
1118
- data: convertDataContentToBase64String(
1119
- base64Content
1120
- ),
1121
- mimeType,
1122
- providerMetadata: part.experimental_providerMetadata
1123
- };
1124
- } catch (error) {
1125
- throw new Error(
1126
- `Error processing data URL: ${(0, import_provider_utils3.getErrorMessage)(
1127
- message
1128
- )}`
1129
- );
1130
- }
1131
- }
1132
- }
1133
- } catch (_ignored) {
1134
- }
1135
- }
1136
- const imageBase64 = convertDataContentToBase64String(
1137
- part.data
1138
- );
1139
- return {
1140
- type: "file",
1141
- data: imageBase64,
1142
- mimeType: part.mimeType,
1143
- providerMetadata: part.experimental_providerMetadata
1144
- };
1145
- }
1146
- }
1147
- }
1148
- ).filter((part) => part.type !== "text" || part.text !== ""),
959
+ content: message.content.map((part) => convertPartToLanguageModelPart(part, downloadedAssets)).filter((part) => part.type !== "text" || part.text !== ""),
1149
960
  providerMetadata: message.experimental_providerMetadata
1150
961
  };
1151
962
  }
@@ -1180,6 +991,8 @@ function convertToLanguageModelMessage(message, downloadedAssets) {
1180
991
  toolCallId: part.toolCallId,
1181
992
  toolName: part.toolName,
1182
993
  result: part.result,
994
+ content: part.experimental_content,
995
+ isError: part.isError,
1183
996
  providerMetadata: part.experimental_providerMetadata
1184
997
  })),
1185
998
  providerMetadata: message.experimental_providerMetadata
@@ -1191,17 +1004,19 @@ function convertToLanguageModelMessage(message, downloadedAssets) {
1191
1004
  }
1192
1005
  }
1193
1006
  }
1194
- async function downloadAssets(messages, downloadImplementation) {
1007
+ async function downloadAssets(messages, downloadImplementation, modelSupportsImageUrls, modelSupportsUrl) {
1195
1008
  const urls = messages.filter((message) => message.role === "user").map((message) => message.content).filter(
1196
1009
  (content) => Array.isArray(content)
1197
1010
  ).flat().filter(
1198
1011
  (part) => part.type === "image" || part.type === "file"
1012
+ ).filter(
1013
+ (part) => !(part.type === "image" && modelSupportsImageUrls === true)
1199
1014
  ).map((part) => part.type === "image" ? part.image : part.data).map(
1200
1015
  (part) => (
1201
1016
  // support string urls:
1202
1017
  typeof part === "string" && (part.startsWith("http:") || part.startsWith("https:")) ? new URL(part) : part
1203
1018
  )
1204
- ).filter((image) => image instanceof URL);
1019
+ ).filter((image) => image instanceof URL).filter((url) => !modelSupportsUrl(url));
1205
1020
  const downloadedImages = await Promise.all(
1206
1021
  urls.map(async (url) => ({
1207
1022
  url,
@@ -1212,6 +1027,79 @@ async function downloadAssets(messages, downloadImplementation) {
1212
1027
  downloadedImages.map(({ url, data }) => [url.toString(), data])
1213
1028
  );
1214
1029
  }
1030
+ function convertPartToLanguageModelPart(part, downloadedAssets) {
1031
+ if (part.type === "text") {
1032
+ return {
1033
+ type: "text",
1034
+ text: part.text,
1035
+ providerMetadata: part.experimental_providerMetadata
1036
+ };
1037
+ }
1038
+ let mimeType = part.mimeType;
1039
+ let data;
1040
+ let content;
1041
+ let normalizedData;
1042
+ const type = part.type;
1043
+ switch (type) {
1044
+ case "image":
1045
+ data = part.image;
1046
+ break;
1047
+ case "file":
1048
+ data = part.data;
1049
+ break;
1050
+ default:
1051
+ throw new Error(`Unsupported part type: ${type}`);
1052
+ }
1053
+ try {
1054
+ content = typeof data === "string" ? new URL(data) : data;
1055
+ } catch (error) {
1056
+ content = data;
1057
+ }
1058
+ if (content instanceof URL) {
1059
+ if (content.protocol === "data:") {
1060
+ const { mimeType: dataUrlMimeType, base64Content } = splitDataUrl(
1061
+ content.toString()
1062
+ );
1063
+ if (dataUrlMimeType == null || base64Content == null) {
1064
+ throw new Error(`Invalid data URL format in part ${type}`);
1065
+ }
1066
+ mimeType = dataUrlMimeType;
1067
+ normalizedData = convertDataContentToUint8Array(base64Content);
1068
+ } else {
1069
+ const downloadedFile = downloadedAssets[content.toString()];
1070
+ if (downloadedFile) {
1071
+ normalizedData = downloadedFile.data;
1072
+ mimeType != null ? mimeType : mimeType = downloadedFile.mimeType;
1073
+ } else {
1074
+ normalizedData = content;
1075
+ }
1076
+ }
1077
+ } else {
1078
+ normalizedData = convertDataContentToUint8Array(content);
1079
+ }
1080
+ switch (type) {
1081
+ case "image":
1082
+ if (mimeType == null && normalizedData instanceof Uint8Array) {
1083
+ mimeType = detectImageMimeType(normalizedData);
1084
+ }
1085
+ return {
1086
+ type: "image",
1087
+ image: normalizedData,
1088
+ mimeType,
1089
+ providerMetadata: part.experimental_providerMetadata
1090
+ };
1091
+ case "file":
1092
+ if (mimeType == null) {
1093
+ throw new Error(`Mime type is missing for file part`);
1094
+ }
1095
+ return {
1096
+ type: "file",
1097
+ data: normalizedData instanceof Uint8Array ? convertDataContentToBase64String(normalizedData) : normalizedData,
1098
+ mimeType,
1099
+ providerMetadata: part.experimental_providerMetadata
1100
+ };
1101
+ }
1102
+ }
1215
1103
 
1216
1104
  // errors/invalid-argument-error.ts
1217
1105
  var import_provider6 = require("@ai-sdk/provider");
@@ -1365,13 +1253,13 @@ function prepareCallSettings({
1365
1253
  };
1366
1254
  }
1367
1255
 
1368
- // core/prompt/validate-prompt.ts
1256
+ // core/prompt/standardize-prompt.ts
1369
1257
  var import_provider7 = require("@ai-sdk/provider");
1370
- var import_provider_utils4 = require("@ai-sdk/provider-utils");
1371
- var import_zod6 = require("zod");
1258
+ var import_provider_utils3 = require("@ai-sdk/provider-utils");
1259
+ var import_zod7 = require("zod");
1372
1260
 
1373
1261
  // core/prompt/message.ts
1374
- var import_zod5 = require("zod");
1262
+ var import_zod6 = require("zod");
1375
1263
 
1376
1264
  // core/types/provider-metadata.ts
1377
1265
  var import_zod3 = require("zod");
@@ -1396,75 +1284,91 @@ var providerMetadataSchema = import_zod3.z.record(
1396
1284
  );
1397
1285
 
1398
1286
  // core/prompt/content-part.ts
1287
+ var import_zod5 = require("zod");
1288
+
1289
+ // core/prompt/tool-result-content.ts
1399
1290
  var import_zod4 = require("zod");
1400
- var textPartSchema = import_zod4.z.object({
1401
- type: import_zod4.z.literal("text"),
1402
- text: import_zod4.z.string(),
1291
+ var toolResultContentSchema = import_zod4.z.array(
1292
+ import_zod4.z.union([
1293
+ import_zod4.z.object({ type: import_zod4.z.literal("text"), text: import_zod4.z.string() }),
1294
+ import_zod4.z.object({
1295
+ type: import_zod4.z.literal("image"),
1296
+ data: import_zod4.z.string(),
1297
+ mimeType: import_zod4.z.string().optional()
1298
+ })
1299
+ ])
1300
+ );
1301
+
1302
+ // core/prompt/content-part.ts
1303
+ var textPartSchema = import_zod5.z.object({
1304
+ type: import_zod5.z.literal("text"),
1305
+ text: import_zod5.z.string(),
1403
1306
  experimental_providerMetadata: providerMetadataSchema.optional()
1404
1307
  });
1405
- var imagePartSchema = import_zod4.z.object({
1406
- type: import_zod4.z.literal("image"),
1407
- image: import_zod4.z.union([dataContentSchema, import_zod4.z.instanceof(URL)]),
1408
- mimeType: import_zod4.z.string().optional(),
1308
+ var imagePartSchema = import_zod5.z.object({
1309
+ type: import_zod5.z.literal("image"),
1310
+ image: import_zod5.z.union([dataContentSchema, import_zod5.z.instanceof(URL)]),
1311
+ mimeType: import_zod5.z.string().optional(),
1409
1312
  experimental_providerMetadata: providerMetadataSchema.optional()
1410
1313
  });
1411
- var filePartSchema = import_zod4.z.object({
1412
- type: import_zod4.z.literal("file"),
1413
- data: import_zod4.z.union([dataContentSchema, import_zod4.z.instanceof(URL)]),
1414
- mimeType: import_zod4.z.string(),
1314
+ var filePartSchema = import_zod5.z.object({
1315
+ type: import_zod5.z.literal("file"),
1316
+ data: import_zod5.z.union([dataContentSchema, import_zod5.z.instanceof(URL)]),
1317
+ mimeType: import_zod5.z.string(),
1415
1318
  experimental_providerMetadata: providerMetadataSchema.optional()
1416
1319
  });
1417
- var toolCallPartSchema = import_zod4.z.object({
1418
- type: import_zod4.z.literal("tool-call"),
1419
- toolCallId: import_zod4.z.string(),
1420
- toolName: import_zod4.z.string(),
1421
- args: import_zod4.z.unknown()
1320
+ var toolCallPartSchema = import_zod5.z.object({
1321
+ type: import_zod5.z.literal("tool-call"),
1322
+ toolCallId: import_zod5.z.string(),
1323
+ toolName: import_zod5.z.string(),
1324
+ args: import_zod5.z.unknown()
1422
1325
  });
1423
- var toolResultPartSchema = import_zod4.z.object({
1424
- type: import_zod4.z.literal("tool-result"),
1425
- toolCallId: import_zod4.z.string(),
1426
- toolName: import_zod4.z.string(),
1427
- result: import_zod4.z.unknown(),
1428
- isError: import_zod4.z.boolean().optional(),
1326
+ var toolResultPartSchema = import_zod5.z.object({
1327
+ type: import_zod5.z.literal("tool-result"),
1328
+ toolCallId: import_zod5.z.string(),
1329
+ toolName: import_zod5.z.string(),
1330
+ result: import_zod5.z.unknown(),
1331
+ content: toolResultContentSchema.optional(),
1332
+ isError: import_zod5.z.boolean().optional(),
1429
1333
  experimental_providerMetadata: providerMetadataSchema.optional()
1430
1334
  });
1431
1335
 
1432
1336
  // core/prompt/message.ts
1433
- var coreSystemMessageSchema = import_zod5.z.object({
1434
- role: import_zod5.z.literal("system"),
1435
- content: import_zod5.z.string(),
1337
+ var coreSystemMessageSchema = import_zod6.z.object({
1338
+ role: import_zod6.z.literal("system"),
1339
+ content: import_zod6.z.string(),
1436
1340
  experimental_providerMetadata: providerMetadataSchema.optional()
1437
1341
  });
1438
- var coreUserMessageSchema = import_zod5.z.object({
1439
- role: import_zod5.z.literal("user"),
1440
- content: import_zod5.z.union([
1441
- import_zod5.z.string(),
1442
- import_zod5.z.array(import_zod5.z.union([textPartSchema, imagePartSchema, filePartSchema]))
1342
+ var coreUserMessageSchema = import_zod6.z.object({
1343
+ role: import_zod6.z.literal("user"),
1344
+ content: import_zod6.z.union([
1345
+ import_zod6.z.string(),
1346
+ import_zod6.z.array(import_zod6.z.union([textPartSchema, imagePartSchema, filePartSchema]))
1443
1347
  ]),
1444
1348
  experimental_providerMetadata: providerMetadataSchema.optional()
1445
1349
  });
1446
- var coreAssistantMessageSchema = import_zod5.z.object({
1447
- role: import_zod5.z.literal("assistant"),
1448
- content: import_zod5.z.union([
1449
- import_zod5.z.string(),
1450
- import_zod5.z.array(import_zod5.z.union([textPartSchema, toolCallPartSchema]))
1350
+ var coreAssistantMessageSchema = import_zod6.z.object({
1351
+ role: import_zod6.z.literal("assistant"),
1352
+ content: import_zod6.z.union([
1353
+ import_zod6.z.string(),
1354
+ import_zod6.z.array(import_zod6.z.union([textPartSchema, toolCallPartSchema]))
1451
1355
  ]),
1452
1356
  experimental_providerMetadata: providerMetadataSchema.optional()
1453
1357
  });
1454
- var coreToolMessageSchema = import_zod5.z.object({
1455
- role: import_zod5.z.literal("tool"),
1456
- content: import_zod5.z.array(toolResultPartSchema),
1358
+ var coreToolMessageSchema = import_zod6.z.object({
1359
+ role: import_zod6.z.literal("tool"),
1360
+ content: import_zod6.z.array(toolResultPartSchema),
1457
1361
  experimental_providerMetadata: providerMetadataSchema.optional()
1458
1362
  });
1459
- var coreMessageSchema = import_zod5.z.union([
1363
+ var coreMessageSchema = import_zod6.z.union([
1460
1364
  coreSystemMessageSchema,
1461
1365
  coreUserMessageSchema,
1462
1366
  coreAssistantMessageSchema,
1463
1367
  coreToolMessageSchema
1464
1368
  ]);
1465
1369
 
1466
- // core/prompt/validate-prompt.ts
1467
- function validatePrompt(prompt) {
1370
+ // core/prompt/standardize-prompt.ts
1371
+ function standardizePrompt(prompt) {
1468
1372
  if (prompt.prompt == null && prompt.messages == null) {
1469
1373
  throw new import_provider7.InvalidPromptError({
1470
1374
  prompt,
@@ -1492,15 +1396,19 @@ function validatePrompt(prompt) {
1492
1396
  }
1493
1397
  return {
1494
1398
  type: "prompt",
1495
- prompt: prompt.prompt,
1496
- messages: void 0,
1497
- system: prompt.system
1399
+ system: prompt.system,
1400
+ messages: [
1401
+ {
1402
+ role: "user",
1403
+ content: prompt.prompt
1404
+ }
1405
+ ]
1498
1406
  };
1499
1407
  }
1500
1408
  if (prompt.messages != null) {
1501
- const validationResult = (0, import_provider_utils4.safeValidateTypes)({
1409
+ const validationResult = (0, import_provider_utils3.safeValidateTypes)({
1502
1410
  value: prompt.messages,
1503
- schema: import_zod6.z.array(coreMessageSchema)
1411
+ schema: import_zod7.z.array(coreMessageSchema)
1504
1412
  });
1505
1413
  if (!validationResult.success) {
1506
1414
  throw new import_provider7.InvalidPromptError({
@@ -1511,7 +1419,6 @@ function validatePrompt(prompt) {
1511
1419
  }
1512
1420
  return {
1513
1421
  type: "messages",
1514
- prompt: void 0,
1515
1422
  messages: prompt.messages,
1516
1423
  // only possible case bc of checks above
1517
1424
  system: prompt.system
@@ -1602,7 +1509,7 @@ _a6 = symbol6;
1602
1509
 
1603
1510
  // core/generate-object/output-strategy.ts
1604
1511
  var import_provider9 = require("@ai-sdk/provider");
1605
- var import_provider_utils5 = require("@ai-sdk/provider-utils");
1512
+ var import_provider_utils4 = require("@ai-sdk/provider-utils");
1606
1513
  var import_ui_utils = require("@ai-sdk/ui-utils");
1607
1514
 
1608
1515
  // core/util/async-iterable-stream.ts
@@ -1652,7 +1559,7 @@ var objectOutputStrategy = (schema) => ({
1652
1559
  };
1653
1560
  },
1654
1561
  validateFinalResult(value) {
1655
- return (0, import_provider_utils5.safeValidateTypes)({ value, schema });
1562
+ return (0, import_provider_utils4.safeValidateTypes)({ value, schema });
1656
1563
  },
1657
1564
  createElementStream() {
1658
1565
  throw new import_provider9.UnsupportedFunctionalityError({
@@ -1691,7 +1598,7 @@ var arrayOutputStrategy = (schema) => {
1691
1598
  const resultArray = [];
1692
1599
  for (let i = 0; i < inputArray.length; i++) {
1693
1600
  const element = inputArray[i];
1694
- const result = (0, import_provider_utils5.safeValidateTypes)({ value: element, schema });
1601
+ const result = (0, import_provider_utils4.safeValidateTypes)({ value: element, schema });
1695
1602
  if (i === inputArray.length - 1 && !isFinalDelta) {
1696
1603
  continue;
1697
1604
  }
@@ -1732,7 +1639,7 @@ var arrayOutputStrategy = (schema) => {
1732
1639
  }
1733
1640
  const inputArray = value.elements;
1734
1641
  for (const element of inputArray) {
1735
- const result = (0, import_provider_utils5.safeValidateTypes)({ value: element, schema });
1642
+ const result = (0, import_provider_utils4.safeValidateTypes)({ value: element, schema });
1736
1643
  if (!result.success) {
1737
1644
  return result;
1738
1645
  }
@@ -1961,7 +1868,7 @@ function validateObjectGenerationInput({
1961
1868
  }
1962
1869
 
1963
1870
  // core/generate-object/generate-object.ts
1964
- var originalGenerateId = (0, import_provider_utils6.createIdGenerator)({ prefix: "aiobj-", size: 24 });
1871
+ var originalGenerateId = (0, import_provider_utils5.createIdGenerator)({ prefix: "aiobj", size: 24 });
1965
1872
  async function generateObject({
1966
1873
  model,
1967
1874
  enum: enumValues,
@@ -2031,6 +1938,7 @@ async function generateObject({
2031
1938
  }),
2032
1939
  tracer,
2033
1940
  fn: async (span) => {
1941
+ var _a11, _b;
2034
1942
  const retry = retryWithExponentialBackoff({ maxRetries });
2035
1943
  if (mode === "auto" || mode == null) {
2036
1944
  mode = model.defaultObjectGenerationMode;
@@ -2041,11 +1949,12 @@ async function generateObject({
2041
1949
  let warnings;
2042
1950
  let rawResponse;
2043
1951
  let response;
1952
+ let request;
2044
1953
  let logprobs;
2045
1954
  let resultProviderMetadata;
2046
1955
  switch (mode) {
2047
1956
  case "json": {
2048
- const validatedPrompt = validatePrompt({
1957
+ const standardPrompt = standardizePrompt({
2049
1958
  system: outputStrategy.jsonSchema == null ? injectJsonInstruction({ prompt: system }) : model.supportsStructuredOutputs ? system : injectJsonInstruction({
2050
1959
  prompt: system,
2051
1960
  schema: outputStrategy.jsonSchema
@@ -2054,10 +1963,10 @@ async function generateObject({
2054
1963
  messages
2055
1964
  });
2056
1965
  const promptMessages = await convertToLanguageModelPrompt({
2057
- prompt: validatedPrompt,
2058
- modelSupportsImageUrls: model.supportsImageUrls
1966
+ prompt: standardPrompt,
1967
+ modelSupportsImageUrls: model.supportsImageUrls,
1968
+ modelSupportsUrl: model.supportsUrl
2059
1969
  });
2060
- const inputFormat = validatedPrompt.type;
2061
1970
  const generateResult = await retry(
2062
1971
  () => recordSpan({
2063
1972
  name: "ai.generateObject.doGenerate",
@@ -2070,7 +1979,7 @@ async function generateObject({
2070
1979
  }),
2071
1980
  ...baseTelemetryAttributes,
2072
1981
  "ai.prompt.format": {
2073
- input: () => inputFormat
1982
+ input: () => standardPrompt.type
2074
1983
  },
2075
1984
  "ai.prompt.messages": {
2076
1985
  input: () => JSON.stringify(promptMessages)
@@ -2089,7 +1998,7 @@ async function generateObject({
2089
1998
  }),
2090
1999
  tracer,
2091
2000
  fn: async (span2) => {
2092
- var _a11, _b, _c, _d, _e, _f;
2001
+ var _a12, _b2, _c, _d, _e, _f;
2093
2002
  const result2 = await model.doGenerate({
2094
2003
  mode: {
2095
2004
  type: "object-json",
@@ -2098,7 +2007,7 @@ async function generateObject({
2098
2007
  description: schemaDescription
2099
2008
  },
2100
2009
  ...prepareCallSettings(settings),
2101
- inputFormat,
2010
+ inputFormat: standardPrompt.type,
2102
2011
  prompt: promptMessages,
2103
2012
  providerMetadata,
2104
2013
  abortSignal,
@@ -2108,7 +2017,7 @@ async function generateObject({
2108
2017
  throw new NoObjectGeneratedError();
2109
2018
  }
2110
2019
  const responseData = {
2111
- id: (_b = (_a11 = result2.response) == null ? void 0 : _a11.id) != null ? _b : generateId3(),
2020
+ id: (_b2 = (_a12 = result2.response) == null ? void 0 : _a12.id) != null ? _b2 : generateId3(),
2112
2021
  timestamp: (_d = (_c = result2.response) == null ? void 0 : _c.timestamp) != null ? _d : currentDate(),
2113
2022
  modelId: (_f = (_e = result2.response) == null ? void 0 : _e.modelId) != null ? _f : model.modelId
2114
2023
  };
@@ -2146,18 +2055,20 @@ async function generateObject({
2146
2055
  rawResponse = generateResult.rawResponse;
2147
2056
  logprobs = generateResult.logprobs;
2148
2057
  resultProviderMetadata = generateResult.providerMetadata;
2058
+ request = (_a11 = generateResult.request) != null ? _a11 : {};
2149
2059
  response = generateResult.responseData;
2150
2060
  break;
2151
2061
  }
2152
2062
  case "tool": {
2153
- const validatedPrompt = validatePrompt({
2063
+ const validatedPrompt = standardizePrompt({
2154
2064
  system,
2155
2065
  prompt,
2156
2066
  messages
2157
2067
  });
2158
2068
  const promptMessages = await convertToLanguageModelPrompt({
2159
2069
  prompt: validatedPrompt,
2160
- modelSupportsImageUrls: model.supportsImageUrls
2070
+ modelSupportsImageUrls: model.supportsImageUrls,
2071
+ modelSupportsUrl: model.supportsUrl
2161
2072
  });
2162
2073
  const inputFormat = validatedPrompt.type;
2163
2074
  const generateResult = await retry(
@@ -2191,7 +2102,7 @@ async function generateObject({
2191
2102
  }),
2192
2103
  tracer,
2193
2104
  fn: async (span2) => {
2194
- var _a11, _b, _c, _d, _e, _f, _g, _h;
2105
+ var _a12, _b2, _c, _d, _e, _f, _g, _h;
2195
2106
  const result2 = await model.doGenerate({
2196
2107
  mode: {
2197
2108
  type: "object-tool",
@@ -2209,7 +2120,7 @@ async function generateObject({
2209
2120
  abortSignal,
2210
2121
  headers
2211
2122
  });
2212
- const objectText = (_b = (_a11 = result2.toolCalls) == null ? void 0 : _a11[0]) == null ? void 0 : _b.args;
2123
+ const objectText = (_b2 = (_a12 = result2.toolCalls) == null ? void 0 : _a12[0]) == null ? void 0 : _b2.args;
2213
2124
  if (objectText === void 0) {
2214
2125
  throw new NoObjectGeneratedError();
2215
2126
  }
@@ -2252,6 +2163,7 @@ async function generateObject({
2252
2163
  rawResponse = generateResult.rawResponse;
2253
2164
  logprobs = generateResult.logprobs;
2254
2165
  resultProviderMetadata = generateResult.providerMetadata;
2166
+ request = (_b = generateResult.request) != null ? _b : {};
2255
2167
  response = generateResult.responseData;
2256
2168
  break;
2257
2169
  }
@@ -2265,7 +2177,7 @@ async function generateObject({
2265
2177
  throw new Error(`Unsupported mode: ${_exhaustiveCheck}`);
2266
2178
  }
2267
2179
  }
2268
- const parseResult = (0, import_provider_utils6.safeParseJSON)({ text: result });
2180
+ const parseResult = (0, import_provider_utils5.safeParseJSON)({ text: result });
2269
2181
  if (!parseResult.success) {
2270
2182
  throw parseResult.error;
2271
2183
  }
@@ -2298,6 +2210,7 @@ async function generateObject({
2298
2210
  finishReason,
2299
2211
  usage: calculateLanguageModelUsage(usage),
2300
2212
  warnings,
2213
+ request,
2301
2214
  response: {
2302
2215
  ...response,
2303
2216
  headers: rawResponse == null ? void 0 : rawResponse.headers
@@ -2316,6 +2229,7 @@ var DefaultGenerateObjectResult = class {
2316
2229
  this.warnings = options.warnings;
2317
2230
  this.experimental_providerMetadata = options.providerMetadata;
2318
2231
  this.response = options.response;
2232
+ this.request = options.request;
2319
2233
  this.rawResponse = {
2320
2234
  headers: options.response.headers
2321
2235
  };
@@ -2334,7 +2248,7 @@ var DefaultGenerateObjectResult = class {
2334
2248
  var experimental_generateObject = generateObject;
2335
2249
 
2336
2250
  // core/generate-object/stream-object.ts
2337
- var import_provider_utils7 = require("@ai-sdk/provider-utils");
2251
+ var import_provider_utils6 = require("@ai-sdk/provider-utils");
2338
2252
  var import_ui_utils2 = require("@ai-sdk/ui-utils");
2339
2253
 
2340
2254
  // util/create-resolvable-promise.ts
@@ -2444,7 +2358,7 @@ function writeToServerResponse({
2444
2358
  }
2445
2359
 
2446
2360
  // core/generate-object/stream-object.ts
2447
- var originalGenerateId2 = (0, import_provider_utils7.createIdGenerator)({ prefix: "aiobj-", size: 24 });
2361
+ var originalGenerateId2 = (0, import_provider_utils6.createIdGenerator)({ prefix: "aiobj", size: 24 });
2448
2362
  async function streamObject({
2449
2363
  model,
2450
2364
  schema: inputSchema,
@@ -2518,7 +2432,7 @@ async function streamObject({
2518
2432
  let transformer;
2519
2433
  switch (mode) {
2520
2434
  case "json": {
2521
- const validatedPrompt = validatePrompt({
2435
+ const standardPrompt = standardizePrompt({
2522
2436
  system: outputStrategy.jsonSchema == null ? injectJsonInstruction({ prompt: system }) : model.supportsStructuredOutputs ? system : injectJsonInstruction({
2523
2437
  prompt: system,
2524
2438
  schema: outputStrategy.jsonSchema
@@ -2534,10 +2448,11 @@ async function streamObject({
2534
2448
  description: schemaDescription
2535
2449
  },
2536
2450
  ...prepareCallSettings(settings),
2537
- inputFormat: validatedPrompt.type,
2451
+ inputFormat: standardPrompt.type,
2538
2452
  prompt: await convertToLanguageModelPrompt({
2539
- prompt: validatedPrompt,
2540
- modelSupportsImageUrls: model.supportsImageUrls
2453
+ prompt: standardPrompt,
2454
+ modelSupportsImageUrls: model.supportsImageUrls,
2455
+ modelSupportsUrl: model.supportsUrl
2541
2456
  }),
2542
2457
  providerMetadata,
2543
2458
  abortSignal,
@@ -2560,7 +2475,7 @@ async function streamObject({
2560
2475
  break;
2561
2476
  }
2562
2477
  case "tool": {
2563
- const validatedPrompt = validatePrompt({
2478
+ const validatedPrompt = standardizePrompt({
2564
2479
  system,
2565
2480
  prompt,
2566
2481
  messages
@@ -2579,7 +2494,8 @@ async function streamObject({
2579
2494
  inputFormat: validatedPrompt.type,
2580
2495
  prompt: await convertToLanguageModelPrompt({
2581
2496
  prompt: validatedPrompt,
2582
- modelSupportsImageUrls: model.supportsImageUrls
2497
+ modelSupportsImageUrls: model.supportsImageUrls,
2498
+ modelSupportsUrl: model.supportsUrl
2583
2499
  }),
2584
2500
  providerMetadata,
2585
2501
  abortSignal,
@@ -2612,7 +2528,7 @@ async function streamObject({
2612
2528
  }
2613
2529
  }
2614
2530
  const {
2615
- result: { stream, warnings, rawResponse },
2531
+ result: { stream, warnings, rawResponse, request },
2616
2532
  doStreamSpan,
2617
2533
  startTimestampMs
2618
2534
  } = await retry(
@@ -2658,6 +2574,7 @@ async function streamObject({
2658
2574
  stream: stream.pipeThrough(new TransformStream(transformer)),
2659
2575
  warnings,
2660
2576
  rawResponse,
2577
+ request: request != null ? request : {},
2661
2578
  onFinish,
2662
2579
  rootSpan,
2663
2580
  doStreamSpan,
@@ -2676,6 +2593,7 @@ var DefaultStreamObjectResult = class {
2676
2593
  stream,
2677
2594
  warnings,
2678
2595
  rawResponse,
2596
+ request,
2679
2597
  outputStrategy,
2680
2598
  onFinish,
2681
2599
  rootSpan,
@@ -2690,6 +2608,7 @@ var DefaultStreamObjectResult = class {
2690
2608
  this.warnings = warnings;
2691
2609
  this.rawResponse = rawResponse;
2692
2610
  this.outputStrategy = outputStrategy;
2611
+ this.request = Promise.resolve(request);
2693
2612
  this.objectPromise = new DelayedPromise();
2694
2613
  const { resolve: resolveUsage, promise: usagePromise } = createResolvablePromise();
2695
2614
  this.usage = usagePromise;
@@ -2947,7 +2866,7 @@ var DefaultStreamObjectResult = class {
2947
2866
  var experimental_streamObject = streamObject;
2948
2867
 
2949
2868
  // core/generate-text/generate-text.ts
2950
- var import_provider_utils9 = require("@ai-sdk/provider-utils");
2869
+ var import_provider_utils8 = require("@ai-sdk/provider-utils");
2951
2870
 
2952
2871
  // errors/index.ts
2953
2872
  var import_provider13 = require("@ai-sdk/provider");
@@ -3083,12 +3002,30 @@ function prepareToolsAndToolChoice({
3083
3002
  ([name11]) => activeTools.includes(name11)
3084
3003
  ) : Object.entries(tools);
3085
3004
  return {
3086
- tools: filteredTools.map(([name11, tool2]) => ({
3087
- type: "function",
3088
- name: name11,
3089
- description: tool2.description,
3090
- parameters: (0, import_ui_utils3.asSchema)(tool2.parameters).jsonSchema
3091
- })),
3005
+ tools: filteredTools.map(([name11, tool2]) => {
3006
+ const toolType = tool2.type;
3007
+ switch (toolType) {
3008
+ case void 0:
3009
+ case "function":
3010
+ return {
3011
+ type: "function",
3012
+ name: name11,
3013
+ description: tool2.description,
3014
+ parameters: (0, import_ui_utils3.asSchema)(tool2.parameters).jsonSchema
3015
+ };
3016
+ case "provider-defined":
3017
+ return {
3018
+ type: "provider-defined",
3019
+ name: name11,
3020
+ id: tool2.id,
3021
+ args: tool2.args
3022
+ };
3023
+ default: {
3024
+ const exhaustiveCheck = toolType;
3025
+ throw new Error(`Unsupported tool type: ${exhaustiveCheck}`);
3026
+ }
3027
+ }
3028
+ }),
3092
3029
  toolChoice: toolChoice == null ? { type: "auto" } : typeof toolChoice === "string" ? { type: toolChoice } : { type: "tool", toolName: toolChoice.toolName }
3093
3030
  };
3094
3031
  }
@@ -3107,7 +3044,7 @@ function removeTextAfterLastWhitespace(text) {
3107
3044
  }
3108
3045
 
3109
3046
  // core/generate-text/parse-tool-call.ts
3110
- var import_provider_utils8 = require("@ai-sdk/provider-utils");
3047
+ var import_provider_utils7 = require("@ai-sdk/provider-utils");
3111
3048
  var import_ui_utils4 = require("@ai-sdk/ui-utils");
3112
3049
  function parseToolCall({
3113
3050
  toolCall,
@@ -3125,7 +3062,7 @@ function parseToolCall({
3125
3062
  });
3126
3063
  }
3127
3064
  const schema = (0, import_ui_utils4.asSchema)(tool2.parameters);
3128
- const parseResult = toolCall.args.trim() === "" ? (0, import_provider_utils8.safeValidateTypes)({ value: {}, schema }) : (0, import_provider_utils8.safeParseJSON)({ text: toolCall.args, schema });
3065
+ const parseResult = toolCall.args.trim() === "" ? (0, import_provider_utils7.safeValidateTypes)({ value: {}, schema }) : (0, import_provider_utils7.safeParseJSON)({ text: toolCall.args, schema });
3129
3066
  if (parseResult.success === false) {
3130
3067
  throw new InvalidToolArgumentsError({
3131
3068
  toolName,
@@ -3144,6 +3081,7 @@ function parseToolCall({
3144
3081
  // core/generate-text/to-response-messages.ts
3145
3082
  function toResponseMessages({
3146
3083
  text = "",
3084
+ tools,
3147
3085
  toolCalls,
3148
3086
  toolResults
3149
3087
  }) {
@@ -3155,19 +3093,28 @@ function toResponseMessages({
3155
3093
  if (toolResults.length > 0) {
3156
3094
  responseMessages.push({
3157
3095
  role: "tool",
3158
- content: toolResults.map((result) => ({
3159
- type: "tool-result",
3160
- toolCallId: result.toolCallId,
3161
- toolName: result.toolName,
3162
- result: result.result
3163
- }))
3096
+ content: toolResults.map((toolResult) => {
3097
+ const tool2 = tools[toolResult.toolName];
3098
+ return (tool2 == null ? void 0 : tool2.experimental_toToolResultContent) != null ? {
3099
+ type: "tool-result",
3100
+ toolCallId: toolResult.toolCallId,
3101
+ toolName: toolResult.toolName,
3102
+ result: tool2.experimental_toToolResultContent(toolResult.result),
3103
+ content: tool2.experimental_toToolResultContent(toolResult.result)
3104
+ } : {
3105
+ type: "tool-result",
3106
+ toolCallId: toolResult.toolCallId,
3107
+ toolName: toolResult.toolName,
3108
+ result: toolResult.result
3109
+ };
3110
+ })
3164
3111
  });
3165
3112
  }
3166
3113
  return responseMessages;
3167
3114
  }
3168
3115
 
3169
3116
  // core/generate-text/generate-text.ts
3170
- var originalGenerateId3 = (0, import_provider_utils9.createIdGenerator)({ prefix: "aitxt-", size: 24 });
3117
+ var originalGenerateId3 = (0, import_provider_utils8.createIdGenerator)({ prefix: "aitxt", size: 24 });
3171
3118
  async function generateText({
3172
3119
  model,
3173
3120
  tools,
@@ -3206,6 +3153,7 @@ async function generateText({
3206
3153
  headers,
3207
3154
  settings: { ...settings, maxRetries }
3208
3155
  });
3156
+ const initialPrompt = standardizePrompt({ system, prompt, messages });
3209
3157
  const tracer = getTracer(telemetry);
3210
3158
  return recordSpan({
3211
3159
  name: "ai.generateText",
@@ -3226,22 +3174,13 @@ async function generateText({
3226
3174
  }),
3227
3175
  tracer,
3228
3176
  fn: async (span) => {
3229
- var _a11, _b, _c, _d, _e;
3177
+ var _a11, _b, _c, _d, _e, _f, _g;
3230
3178
  const retry = retryWithExponentialBackoff({ maxRetries });
3231
- const validatedPrompt = validatePrompt({
3232
- system,
3233
- prompt,
3234
- messages
3235
- });
3236
3179
  const mode = {
3237
3180
  type: "regular",
3238
3181
  ...prepareToolsAndToolChoice({ tools, toolChoice, activeTools })
3239
3182
  };
3240
3183
  const callSettings = prepareCallSettings(settings);
3241
- const promptMessages = await convertToLanguageModelPrompt({
3242
- prompt: validatedPrompt,
3243
- modelSupportsImageUrls: model.supportsImageUrls
3244
- });
3245
3184
  let currentModelResponse;
3246
3185
  let currentToolCalls = [];
3247
3186
  let currentToolResults = [];
@@ -3256,7 +3195,19 @@ async function generateText({
3256
3195
  };
3257
3196
  let stepType = "initial";
3258
3197
  do {
3259
- const currentInputFormat = stepCount === 0 ? validatedPrompt.type : "messages";
3198
+ if (stepCount === 1) {
3199
+ initialPrompt.type = "messages";
3200
+ }
3201
+ const promptFormat = stepCount === 0 ? initialPrompt.type : "messages";
3202
+ const promptMessages = await convertToLanguageModelPrompt({
3203
+ prompt: {
3204
+ type: promptFormat,
3205
+ system: initialPrompt.system,
3206
+ messages: [...initialPrompt.messages, ...responseMessages]
3207
+ },
3208
+ modelSupportsImageUrls: model.supportsImageUrls,
3209
+ modelSupportsUrl: model.supportsUrl
3210
+ });
3260
3211
  currentModelResponse = await retry(
3261
3212
  () => recordSpan({
3262
3213
  name: "ai.generateText.doGenerate",
@@ -3268,7 +3219,7 @@ async function generateText({
3268
3219
  telemetry
3269
3220
  }),
3270
3221
  ...baseTelemetryAttributes,
3271
- "ai.prompt.format": { input: () => currentInputFormat },
3222
+ "ai.prompt.format": { input: () => promptFormat },
3272
3223
  "ai.prompt.messages": {
3273
3224
  input: () => JSON.stringify(promptMessages)
3274
3225
  },
@@ -3286,11 +3237,11 @@ async function generateText({
3286
3237
  }),
3287
3238
  tracer,
3288
3239
  fn: async (span2) => {
3289
- var _a12, _b2, _c2, _d2, _e2, _f;
3240
+ var _a12, _b2, _c2, _d2, _e2, _f2;
3290
3241
  const result = await model.doGenerate({
3291
3242
  mode,
3292
3243
  ...callSettings,
3293
- inputFormat: currentInputFormat,
3244
+ inputFormat: promptFormat,
3294
3245
  prompt: promptMessages,
3295
3246
  providerMetadata,
3296
3247
  abortSignal,
@@ -3299,7 +3250,7 @@ async function generateText({
3299
3250
  const responseData = {
3300
3251
  id: (_b2 = (_a12 = result.response) == null ? void 0 : _a12.id) != null ? _b2 : generateId3(),
3301
3252
  timestamp: (_d2 = (_c2 = result.response) == null ? void 0 : _c2.timestamp) != null ? _d2 : currentDate(),
3302
- modelId: (_f = (_e2 = result.response) == null ? void 0 : _e2.modelId) != null ? _f : model.modelId
3253
+ modelId: (_f2 = (_e2 = result.response) == null ? void 0 : _e2.modelId) != null ? _f2 : model.modelId
3303
3254
  };
3304
3255
  span2.setAttributes(
3305
3256
  selectTelemetryAttributes({
@@ -3369,7 +3320,27 @@ async function generateText({
3369
3320
  }
3370
3321
  const stepText = nextStepType === "continue" ? removeTextAfterLastWhitespace((_b = currentModelResponse.text) != null ? _b : "") : (_c = currentModelResponse.text) != null ? _c : "";
3371
3322
  text = nextStepType === "continue" || stepType === "continue" ? text + stepText : stepText;
3372
- const currentStep = {
3323
+ if (stepType === "continue") {
3324
+ const lastMessage = responseMessages[responseMessages.length - 1];
3325
+ if (typeof lastMessage.content === "string") {
3326
+ lastMessage.content = text;
3327
+ } else {
3328
+ lastMessage.content.push({
3329
+ text: stepText,
3330
+ type: "text"
3331
+ });
3332
+ }
3333
+ } else {
3334
+ responseMessages.push(
3335
+ ...toResponseMessages({
3336
+ text,
3337
+ tools: tools != null ? tools : {},
3338
+ toolCalls: currentToolCalls,
3339
+ toolResults: currentToolResults
3340
+ })
3341
+ );
3342
+ }
3343
+ const currentStepResult = {
3373
3344
  stepType,
3374
3345
  text: stepText,
3375
3346
  toolCalls: currentToolCalls,
@@ -3378,55 +3349,18 @@ async function generateText({
3378
3349
  usage: currentUsage,
3379
3350
  warnings: currentModelResponse.warnings,
3380
3351
  logprobs: currentModelResponse.logprobs,
3352
+ request: (_d = currentModelResponse.request) != null ? _d : {},
3381
3353
  response: {
3382
3354
  ...currentModelResponse.response,
3383
- headers: (_d = currentModelResponse.rawResponse) == null ? void 0 : _d.headers
3355
+ headers: (_e = currentModelResponse.rawResponse) == null ? void 0 : _e.headers,
3356
+ // deep clone msgs to avoid mutating past messages in multi-step:
3357
+ messages: JSON.parse(JSON.stringify(responseMessages))
3384
3358
  },
3385
3359
  experimental_providerMetadata: currentModelResponse.providerMetadata,
3386
3360
  isContinued: nextStepType === "continue"
3387
3361
  };
3388
- steps.push(currentStep);
3389
- await (onStepFinish == null ? void 0 : onStepFinish(currentStep));
3390
- if (stepType === "continue") {
3391
- const lastResponseMessage = responseMessages.pop();
3392
- promptMessages.pop();
3393
- if (typeof lastResponseMessage.content === "string") {
3394
- lastResponseMessage.content = text;
3395
- } else {
3396
- lastResponseMessage.content.push({
3397
- text: stepText,
3398
- type: "text"
3399
- });
3400
- }
3401
- responseMessages.push(lastResponseMessage);
3402
- promptMessages.push(
3403
- convertToLanguageModelMessage(lastResponseMessage, null)
3404
- );
3405
- } else if (nextStepType === "continue") {
3406
- const newResponseMessages = toResponseMessages({
3407
- text,
3408
- toolCalls: currentToolCalls,
3409
- toolResults: currentToolResults
3410
- });
3411
- responseMessages.push(...newResponseMessages);
3412
- promptMessages.push(
3413
- ...newResponseMessages.map(
3414
- (message) => convertToLanguageModelMessage(message, null)
3415
- )
3416
- );
3417
- } else {
3418
- const newResponseMessages = toResponseMessages({
3419
- text: currentModelResponse.text,
3420
- toolCalls: currentToolCalls,
3421
- toolResults: currentToolResults
3422
- });
3423
- responseMessages.push(...newResponseMessages);
3424
- promptMessages.push(
3425
- ...newResponseMessages.map(
3426
- (message) => convertToLanguageModelMessage(message, null)
3427
- )
3428
- );
3429
- }
3362
+ steps.push(currentStepResult);
3363
+ await (onStepFinish == null ? void 0 : onStepFinish(currentStepResult));
3430
3364
  stepType = nextStepType;
3431
3365
  } while (stepType !== "done");
3432
3366
  span.setAttributes(
@@ -3460,9 +3394,11 @@ async function generateText({
3460
3394
  finishReason: currentModelResponse.finishReason,
3461
3395
  usage,
3462
3396
  warnings: currentModelResponse.warnings,
3397
+ request: (_f = currentModelResponse.request) != null ? _f : {},
3463
3398
  response: {
3464
3399
  ...currentModelResponse.response,
3465
- headers: (_e = currentModelResponse.rawResponse) == null ? void 0 : _e.headers
3400
+ headers: (_g = currentModelResponse.rawResponse) == null ? void 0 : _g.headers,
3401
+ messages: responseMessages
3466
3402
  },
3467
3403
  logprobs: currentModelResponse.logprobs,
3468
3404
  responseMessages,
@@ -3540,6 +3476,7 @@ var DefaultGenerateTextResult = class {
3540
3476
  this.finishReason = options.finishReason;
3541
3477
  this.usage = options.usage;
3542
3478
  this.warnings = options.warnings;
3479
+ this.request = options.request;
3543
3480
  this.response = options.response;
3544
3481
  this.responseMessages = options.responseMessages;
3545
3482
  this.roundtrips = options.steps;
@@ -3554,7 +3491,7 @@ var DefaultGenerateTextResult = class {
3554
3491
  var experimental_generateText = generateText;
3555
3492
 
3556
3493
  // core/generate-text/stream-text.ts
3557
- var import_provider_utils10 = require("@ai-sdk/provider-utils");
3494
+ var import_provider_utils9 = require("@ai-sdk/provider-utils");
3558
3495
 
3559
3496
  // core/util/create-stitchable-stream.ts
3560
3497
  function createStitchableStream() {
@@ -3897,7 +3834,7 @@ function runToolsTransformation({
3897
3834
  }
3898
3835
 
3899
3836
  // core/generate-text/stream-text.ts
3900
- var originalGenerateId4 = (0, import_provider_utils10.createIdGenerator)({ prefix: "aitxt-", size: 24 });
3837
+ var originalGenerateId4 = (0, import_provider_utils9.createIdGenerator)({ prefix: "aitxt", size: 24 });
3901
3838
  async function streamText({
3902
3839
  model,
3903
3840
  tools,
@@ -3939,6 +3876,7 @@ async function streamText({
3939
3876
  settings: { ...settings, maxRetries }
3940
3877
  });
3941
3878
  const tracer = getTracer(telemetry);
3879
+ const initialPrompt = standardizePrompt({ system, prompt, messages });
3942
3880
  return recordSpan({
3943
3881
  name: "ai.streamText",
3944
3882
  attributes: selectTelemetryAttributes({
@@ -3958,11 +3896,20 @@ async function streamText({
3958
3896
  fn: async (rootSpan) => {
3959
3897
  const retry = retryWithExponentialBackoff({ maxRetries });
3960
3898
  const startStep = async ({
3961
- promptMessages: promptMessages2,
3962
- promptType
3899
+ responseMessages
3963
3900
  }) => {
3901
+ const promptFormat = responseMessages.length === 0 ? initialPrompt.type : "messages";
3902
+ const promptMessages = await convertToLanguageModelPrompt({
3903
+ prompt: {
3904
+ type: promptFormat,
3905
+ system: initialPrompt.system,
3906
+ messages: [...initialPrompt.messages, ...responseMessages]
3907
+ },
3908
+ modelSupportsImageUrls: model.supportsImageUrls,
3909
+ modelSupportsUrl: model.supportsUrl
3910
+ });
3964
3911
  const {
3965
- result: { stream: stream2, warnings: warnings2, rawResponse: rawResponse2 },
3912
+ result: { stream: stream2, warnings: warnings2, rawResponse: rawResponse2, request: request2 },
3966
3913
  doStreamSpan: doStreamSpan2,
3967
3914
  startTimestampMs: startTimestampMs2
3968
3915
  } = await retry(
@@ -3977,10 +3924,10 @@ async function streamText({
3977
3924
  }),
3978
3925
  ...baseTelemetryAttributes,
3979
3926
  "ai.prompt.format": {
3980
- input: () => promptType
3927
+ input: () => promptFormat
3981
3928
  },
3982
3929
  "ai.prompt.messages": {
3983
- input: () => JSON.stringify(promptMessages2)
3930
+ input: () => JSON.stringify(promptMessages)
3984
3931
  },
3985
3932
  // standardized gen-ai llm span attributes:
3986
3933
  "gen_ai.system": model.provider,
@@ -4010,8 +3957,8 @@ async function streamText({
4010
3957
  })
4011
3958
  },
4012
3959
  ...prepareCallSettings(settings),
4013
- inputFormat: promptType,
4014
- prompt: promptMessages2,
3960
+ inputFormat: promptFormat,
3961
+ prompt: promptMessages,
4015
3962
  providerMetadata,
4016
3963
  abortSignal,
4017
3964
  headers
@@ -4030,28 +3977,23 @@ async function streamText({
4030
3977
  abortSignal
4031
3978
  }),
4032
3979
  warnings: warnings2,
3980
+ request: request2 != null ? request2 : {},
4033
3981
  rawResponse: rawResponse2
4034
3982
  },
4035
3983
  doStreamSpan: doStreamSpan2,
4036
3984
  startTimestampMs: startTimestampMs2
4037
3985
  };
4038
3986
  };
4039
- const promptMessages = await convertToLanguageModelPrompt({
4040
- prompt: validatePrompt({ system, prompt, messages }),
4041
- modelSupportsImageUrls: model.supportsImageUrls
4042
- });
4043
3987
  const {
4044
- result: { stream, warnings, rawResponse },
3988
+ result: { stream, warnings, rawResponse, request },
4045
3989
  doStreamSpan,
4046
3990
  startTimestampMs
4047
- } = await startStep({
4048
- promptType: validatePrompt({ system, prompt, messages }).type,
4049
- promptMessages
4050
- });
3991
+ } = await startStep({ responseMessages: [] });
4051
3992
  return new DefaultStreamTextResult({
4052
3993
  stream,
4053
3994
  warnings,
4054
3995
  rawResponse,
3996
+ request,
4055
3997
  onChunk,
4056
3998
  onFinish,
4057
3999
  onStepFinish,
@@ -4062,11 +4004,11 @@ async function streamText({
4062
4004
  maxSteps,
4063
4005
  continueSteps,
4064
4006
  startStep,
4065
- promptMessages,
4066
4007
  modelId: model.modelId,
4067
4008
  now: now2,
4068
4009
  currentDate,
4069
- generateId: generateId3
4010
+ generateId: generateId3,
4011
+ tools
4070
4012
  });
4071
4013
  }
4072
4014
  });
@@ -4076,6 +4018,7 @@ var DefaultStreamTextResult = class {
4076
4018
  stream,
4077
4019
  warnings,
4078
4020
  rawResponse,
4021
+ request,
4079
4022
  onChunk,
4080
4023
  onFinish,
4081
4024
  onStepFinish,
@@ -4086,11 +4029,11 @@ var DefaultStreamTextResult = class {
4086
4029
  maxSteps,
4087
4030
  continueSteps,
4088
4031
  startStep,
4089
- promptMessages,
4090
4032
  modelId,
4091
4033
  now: now2,
4092
4034
  currentDate,
4093
- generateId: generateId3
4035
+ generateId: generateId3,
4036
+ tools
4094
4037
  }) {
4095
4038
  this.warnings = warnings;
4096
4039
  this.rawResponse = rawResponse;
@@ -4111,6 +4054,8 @@ var DefaultStreamTextResult = class {
4111
4054
  promise: providerMetadataPromise
4112
4055
  } = createResolvablePromise();
4113
4056
  this.experimental_providerMetadata = providerMetadataPromise;
4057
+ const { resolve: resolveRequest, promise: requestPromise } = createResolvablePromise();
4058
+ this.request = requestPromise;
4114
4059
  const { resolve: resolveResponse, promise: responsePromise } = createResolvablePromise();
4115
4060
  this.response = responsePromise;
4116
4061
  const {
@@ -4131,14 +4076,15 @@ var DefaultStreamTextResult = class {
4131
4076
  startTimestamp,
4132
4077
  doStreamSpan: doStreamSpan2,
4133
4078
  currentStep,
4134
- promptMessages: promptMessages2,
4079
+ responseMessages,
4135
4080
  usage = {
4136
4081
  promptTokens: 0,
4137
4082
  completionTokens: 0,
4138
4083
  totalTokens: 0
4139
4084
  },
4140
4085
  stepType,
4141
- previousStepText = ""
4086
+ previousStepText = "",
4087
+ stepRequest
4142
4088
  }) {
4143
4089
  const stepToolCalls = [];
4144
4090
  const stepToolResults = [];
@@ -4265,6 +4211,7 @@ var DefaultStreamTextResult = class {
4265
4211
  },
4266
4212
  // invoke onFinish callback and resolve toolResults promise when the stream is about to close:
4267
4213
  async flush(controller) {
4214
+ var _a11;
4268
4215
  const stepToolCallsJson = stepToolCalls.length > 0 ? JSON.stringify(stepToolCalls) : void 0;
4269
4216
  let nextStepType = "done";
4270
4217
  if (currentStep + 1 < maxSteps) {
@@ -4330,10 +4277,32 @@ var DefaultStreamTextResult = class {
4330
4277
  usage: stepUsage,
4331
4278
  experimental_providerMetadata: stepProviderMetadata,
4332
4279
  logprobs: stepLogProbs,
4333
- response: stepResponse,
4280
+ response: {
4281
+ ...stepResponse
4282
+ },
4334
4283
  isContinued: nextStepType === "continue"
4335
4284
  });
4336
- const stepResult = {
4285
+ if (stepType === "continue") {
4286
+ const lastMessage = responseMessages[responseMessages.length - 1];
4287
+ if (typeof lastMessage.content === "string") {
4288
+ lastMessage.content = stepText;
4289
+ } else {
4290
+ lastMessage.content.push({
4291
+ text: stepText,
4292
+ type: "text"
4293
+ });
4294
+ }
4295
+ } else {
4296
+ responseMessages.push(
4297
+ ...toResponseMessages({
4298
+ text: stepText,
4299
+ tools: tools != null ? tools : {},
4300
+ toolCalls: stepToolCalls,
4301
+ toolResults: stepToolResults
4302
+ })
4303
+ );
4304
+ }
4305
+ const currentStepResult = {
4337
4306
  stepType,
4338
4307
  text: stepText,
4339
4308
  toolCalls: stepToolCalls,
@@ -4342,44 +4311,30 @@ var DefaultStreamTextResult = class {
4342
4311
  usage: stepUsage,
4343
4312
  warnings: self.warnings,
4344
4313
  logprobs: stepLogProbs,
4345
- response: stepResponse,
4314
+ request: stepRequest,
4346
4315
  rawResponse: self.rawResponse,
4316
+ response: {
4317
+ ...stepResponse,
4318
+ headers: (_a11 = self.rawResponse) == null ? void 0 : _a11.headers,
4319
+ // deep clone msgs to avoid mutating past messages in multi-step:
4320
+ messages: JSON.parse(JSON.stringify(responseMessages))
4321
+ },
4347
4322
  experimental_providerMetadata: stepProviderMetadata,
4348
4323
  isContinued: nextStepType === "continue"
4349
4324
  };
4350
- stepResults.push(stepResult);
4351
- await (onStepFinish == null ? void 0 : onStepFinish(stepResult));
4325
+ stepResults.push(currentStepResult);
4326
+ await (onStepFinish == null ? void 0 : onStepFinish(currentStepResult));
4352
4327
  const combinedUsage = {
4353
4328
  promptTokens: usage.promptTokens + stepUsage.promptTokens,
4354
4329
  completionTokens: usage.completionTokens + stepUsage.completionTokens,
4355
4330
  totalTokens: usage.totalTokens + stepUsage.totalTokens
4356
4331
  };
4357
4332
  if (nextStepType !== "done") {
4358
- if (stepType === "continue") {
4359
- const lastPromptMessage = promptMessages2[promptMessages2.length - 1];
4360
- lastPromptMessage.content.push({
4361
- text: stepText,
4362
- type: "text"
4363
- });
4364
- } else {
4365
- promptMessages2.push(
4366
- ...toResponseMessages({
4367
- text: stepText,
4368
- toolCalls: stepToolCalls,
4369
- toolResults: stepToolResults
4370
- }).map(
4371
- (message) => convertToLanguageModelMessage(message, null)
4372
- )
4373
- );
4374
- }
4375
4333
  const {
4376
4334
  result,
4377
4335
  doStreamSpan: doStreamSpan3,
4378
4336
  startTimestampMs: startTimestamp2
4379
- } = await startStep({
4380
- promptType: "messages",
4381
- promptMessages: promptMessages2
4382
- });
4337
+ } = await startStep({ responseMessages });
4383
4338
  self.warnings = result.warnings;
4384
4339
  self.rawResponse = result.rawResponse;
4385
4340
  addStepStream({
@@ -4387,10 +4342,11 @@ var DefaultStreamTextResult = class {
4387
4342
  startTimestamp: startTimestamp2,
4388
4343
  doStreamSpan: doStreamSpan3,
4389
4344
  currentStep: currentStep + 1,
4390
- promptMessages: promptMessages2,
4345
+ responseMessages,
4391
4346
  usage: combinedUsage,
4392
4347
  stepType: nextStepType,
4393
- previousStepText: fullStepText
4348
+ previousStepText: fullStepText,
4349
+ stepRequest: result.request
4394
4350
  });
4395
4351
  return;
4396
4352
  }
@@ -4401,7 +4357,9 @@ var DefaultStreamTextResult = class {
4401
4357
  usage: combinedUsage,
4402
4358
  experimental_providerMetadata: stepProviderMetadata,
4403
4359
  logprobs: stepLogProbs,
4404
- response: stepResponse
4360
+ response: {
4361
+ ...stepResponse
4362
+ }
4405
4363
  });
4406
4364
  closeStitchableStream();
4407
4365
  rootSpan.setAttributes(
@@ -4424,34 +4382,13 @@ var DefaultStreamTextResult = class {
4424
4382
  }
4425
4383
  })
4426
4384
  );
4427
- const responseMessages = stepResults.reduce((responseMessages2, step) => {
4428
- if (step.stepType === "continue") {
4429
- const lastResponseMessage = responseMessages2.pop();
4430
- if (typeof lastResponseMessage.content === "string") {
4431
- lastResponseMessage.content += step.text;
4432
- } else {
4433
- lastResponseMessage.content.push({
4434
- text: step.text,
4435
- type: "text"
4436
- });
4437
- }
4438
- return [...responseMessages2, lastResponseMessage];
4439
- }
4440
- return [
4441
- ...responseMessages2,
4442
- ...toResponseMessages({
4443
- text: step.text,
4444
- toolCalls: step.toolCalls,
4445
- toolResults: step.toolResults
4446
- })
4447
- ];
4448
- }, []);
4449
4385
  resolveUsage(combinedUsage);
4450
4386
  resolveFinishReason(stepFinishReason);
4451
4387
  resolveText(fullStepText);
4452
4388
  resolveToolCalls(stepToolCalls);
4453
4389
  resolveProviderMetadata(stepProviderMetadata);
4454
4390
  resolveToolResults(stepToolResults);
4391
+ resolveRequest(stepRequest);
4455
4392
  resolveResponse({
4456
4393
  ...stepResponse,
4457
4394
  headers: rawResponse == null ? void 0 : rawResponse.headers
@@ -4469,10 +4406,12 @@ var DefaultStreamTextResult = class {
4469
4406
  // optional as well. Therefore we need to cast the toolResults to any.
4470
4407
  // The type exposed to the users will be correctly inferred.
4471
4408
  toolResults: stepToolResults,
4409
+ request: stepRequest,
4472
4410
  rawResponse,
4473
4411
  response: {
4474
4412
  ...stepResponse,
4475
- headers: rawResponse == null ? void 0 : rawResponse.headers
4413
+ headers: rawResponse == null ? void 0 : rawResponse.headers,
4414
+ messages: responseMessages
4476
4415
  },
4477
4416
  warnings,
4478
4417
  experimental_providerMetadata: stepProviderMetadata,
@@ -4494,9 +4433,10 @@ var DefaultStreamTextResult = class {
4494
4433
  startTimestamp: startTimestampMs,
4495
4434
  doStreamSpan,
4496
4435
  currentStep: 0,
4497
- promptMessages,
4436
+ responseMessages: [],
4498
4437
  usage: void 0,
4499
- stepType: "initial"
4438
+ stepType: "initial",
4439
+ stepRequest: request
4500
4440
  });
4501
4441
  }
4502
4442
  /**
@@ -4535,7 +4475,7 @@ var DefaultStreamTextResult = class {
4535
4475
  }
4536
4476
  toDataStreamInternal({
4537
4477
  callbacks = {},
4538
- getErrorMessage: getErrorMessage4 = () => "",
4478
+ getErrorMessage: getErrorMessage3 = () => "",
4539
4479
  // mask error messages for safety by default
4540
4480
  sendUsage = true
4541
4481
  } = {}) {
@@ -4610,7 +4550,7 @@ var DefaultStreamTextResult = class {
4610
4550
  }
4611
4551
  case "error": {
4612
4552
  controller.enqueue(
4613
- (0, import_ui_utils10.formatStreamPart)("error", getErrorMessage4(chunk.error))
4553
+ (0, import_ui_utils10.formatStreamPart)("error", getErrorMessage3(chunk.error))
4614
4554
  );
4615
4555
  break;
4616
4556
  }
@@ -4658,7 +4598,7 @@ var DefaultStreamTextResult = class {
4658
4598
  statusText: "statusText" in options ? options.statusText : void 0
4659
4599
  };
4660
4600
  const data = options == null ? void 0 : "data" in options ? options.data : void 0;
4661
- const getErrorMessage4 = options == null ? void 0 : "getErrorMessage" in options ? options.getErrorMessage : void 0;
4601
+ const getErrorMessage3 = options == null ? void 0 : "getErrorMessage" in options ? options.getErrorMessage : void 0;
4662
4602
  const sendUsage = options == null ? void 0 : "sendUsage" in options ? options.sendUsage : void 0;
4663
4603
  writeToServerResponse({
4664
4604
  response,
@@ -4668,7 +4608,7 @@ var DefaultStreamTextResult = class {
4668
4608
  contentType: "text/plain; charset=utf-8",
4669
4609
  dataStreamVersion: "v1"
4670
4610
  }),
4671
- stream: this.toDataStream({ data, getErrorMessage: getErrorMessage4, sendUsage })
4611
+ stream: this.toDataStream({ data, getErrorMessage: getErrorMessage3, sendUsage })
4672
4612
  });
4673
4613
  }
4674
4614
  pipeTextStreamToResponse(response, init) {
@@ -4700,10 +4640,10 @@ var DefaultStreamTextResult = class {
4700
4640
  statusText: "statusText" in options ? options.statusText : void 0
4701
4641
  };
4702
4642
  const data = options == null ? void 0 : "data" in options ? options.data : void 0;
4703
- const getErrorMessage4 = options == null ? void 0 : "getErrorMessage" in options ? options.getErrorMessage : void 0;
4643
+ const getErrorMessage3 = options == null ? void 0 : "getErrorMessage" in options ? options.getErrorMessage : void 0;
4704
4644
  const sendUsage = options == null ? void 0 : "sendUsage" in options ? options.sendUsage : void 0;
4705
4645
  return new Response(
4706
- this.toDataStream({ data, getErrorMessage: getErrorMessage4, sendUsage }),
4646
+ this.toDataStream({ data, getErrorMessage: getErrorMessage3, sendUsage }),
4707
4647
  {
4708
4648
  status: (_a11 = init == null ? void 0 : init.status) != null ? _a11 : 200,
4709
4649
  statusText: init == null ? void 0 : init.statusText,
@@ -4745,6 +4685,7 @@ var experimental_wrapLanguageModel = ({
4745
4685
  modelId: modelId != null ? modelId : model.modelId,
4746
4686
  defaultObjectGenerationMode: model.defaultObjectGenerationMode,
4747
4687
  supportsImageUrls: model.supportsImageUrls,
4688
+ supportsUrl: model.supportsUrl,
4748
4689
  supportsStructuredOutputs: model.supportsStructuredOutputs,
4749
4690
  async doGenerate(params) {
4750
4691
  const transformedParams = await doTransform({ params, type: "generate" });
@@ -6170,8 +6111,8 @@ var StreamingTextResponse = class extends Response {
6170
6111
  };
6171
6112
 
6172
6113
  // streams/index.ts
6173
- var generateId2 = import_provider_utils11.generateId;
6174
- var nanoid = import_provider_utils11.generateId;
6114
+ var generateId2 = import_provider_utils10.generateId;
6115
+ var nanoid = import_provider_utils10.generateId;
6175
6116
  // Annotate the CommonJS export names for ESM import in node:
6176
6117
  0 && (module.exports = {
6177
6118
  AISDKError,