ai 3.4.17 → 3.4.20

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -593,12 +593,6 @@ var DefaultEmbedManyResult = class {
593
593
  // core/generate-object/generate-object.ts
594
594
  import { createIdGenerator, safeParseJSON } from "@ai-sdk/provider-utils";
595
595
 
596
- // core/prompt/convert-to-language-model-prompt.ts
597
- import {
598
- convertUint8ArrayToBase64 as convertUint8ArrayToBase642,
599
- getErrorMessage as getErrorMessage2
600
- } from "@ai-sdk/provider-utils";
601
-
602
596
  // util/download-error.ts
603
597
  import { AISDKError as AISDKError2 } from "@ai-sdk/provider";
604
598
  var name2 = "AI_DownloadError";
@@ -844,36 +838,21 @@ function splitDataUrl(dataUrl) {
844
838
  async function convertToLanguageModelPrompt({
845
839
  prompt,
846
840
  modelSupportsImageUrls = true,
841
+ modelSupportsUrl = () => false,
847
842
  downloadImplementation = download
848
843
  }) {
849
- const languageModelMessages = [];
850
- if (prompt.system != null) {
851
- languageModelMessages.push({ role: "system", content: prompt.system });
852
- }
853
- const downloadedAssets = modelSupportsImageUrls || prompt.messages == null ? null : await downloadAssets(prompt.messages, downloadImplementation);
854
- const promptType = prompt.type;
855
- switch (promptType) {
856
- case "prompt": {
857
- languageModelMessages.push({
858
- role: "user",
859
- content: [{ type: "text", text: prompt.prompt }]
860
- });
861
- break;
862
- }
863
- case "messages": {
864
- languageModelMessages.push(
865
- ...prompt.messages.map(
866
- (message) => convertToLanguageModelMessage(message, downloadedAssets)
867
- )
868
- );
869
- break;
870
- }
871
- default: {
872
- const _exhaustiveCheck = promptType;
873
- throw new Error(`Unsupported prompt type: ${_exhaustiveCheck}`);
874
- }
875
- }
876
- return languageModelMessages;
844
+ const downloadedAssets = await downloadAssets(
845
+ prompt.messages,
846
+ downloadImplementation,
847
+ modelSupportsImageUrls,
848
+ modelSupportsUrl
849
+ );
850
+ return [
851
+ ...prompt.system != null ? [{ role: "system", content: prompt.system }] : [],
852
+ ...prompt.messages.map(
853
+ (message) => convertToLanguageModelMessage(message, downloadedAssets)
854
+ )
855
+ ];
877
856
  }
878
857
  function convertToLanguageModelMessage(message, downloadedAssets) {
879
858
  const role = message.role;
@@ -895,178 +874,7 @@ function convertToLanguageModelMessage(message, downloadedAssets) {
895
874
  }
896
875
  return {
897
876
  role: "user",
898
- content: message.content.map(
899
- (part) => {
900
- var _a11, _b, _c, _d, _e;
901
- switch (part.type) {
902
- case "text": {
903
- return {
904
- type: "text",
905
- text: part.text,
906
- providerMetadata: part.experimental_providerMetadata
907
- };
908
- }
909
- case "image": {
910
- if (part.image instanceof URL) {
911
- if (downloadedAssets == null) {
912
- return {
913
- type: "image",
914
- image: part.image,
915
- mimeType: part.mimeType,
916
- providerMetadata: part.experimental_providerMetadata
917
- };
918
- } else {
919
- const downloadedImage = downloadedAssets[part.image.toString()];
920
- return {
921
- type: "image",
922
- image: downloadedImage.data,
923
- mimeType: (_a11 = part.mimeType) != null ? _a11 : downloadedImage.mimeType,
924
- providerMetadata: part.experimental_providerMetadata
925
- };
926
- }
927
- }
928
- if (typeof part.image === "string") {
929
- try {
930
- const url = new URL(part.image);
931
- switch (url.protocol) {
932
- case "http:":
933
- case "https:": {
934
- if (downloadedAssets == null) {
935
- return {
936
- type: "image",
937
- image: url,
938
- mimeType: part.mimeType,
939
- providerMetadata: part.experimental_providerMetadata
940
- };
941
- } else {
942
- const downloadedImage = downloadedAssets[url.toString()];
943
- return {
944
- type: "image",
945
- image: downloadedImage.data,
946
- mimeType: (_b = part.mimeType) != null ? _b : downloadedImage.mimeType,
947
- providerMetadata: part.experimental_providerMetadata
948
- };
949
- }
950
- }
951
- case "data:": {
952
- try {
953
- const { mimeType, base64Content } = splitDataUrl(
954
- part.image
955
- );
956
- if (mimeType == null || base64Content == null) {
957
- throw new Error("Invalid data URL format");
958
- }
959
- return {
960
- type: "image",
961
- image: convertDataContentToUint8Array(base64Content),
962
- mimeType,
963
- providerMetadata: part.experimental_providerMetadata
964
- };
965
- } catch (error) {
966
- throw new Error(
967
- `Error processing data URL: ${getErrorMessage2(
968
- message
969
- )}`
970
- );
971
- }
972
- }
973
- }
974
- } catch (_ignored) {
975
- }
976
- }
977
- const imageUint8 = convertDataContentToUint8Array(part.image);
978
- return {
979
- type: "image",
980
- image: imageUint8,
981
- mimeType: (_c = part.mimeType) != null ? _c : detectImageMimeType(imageUint8),
982
- providerMetadata: part.experimental_providerMetadata
983
- };
984
- }
985
- case "file": {
986
- if (part.data instanceof URL) {
987
- if (downloadedAssets == null) {
988
- return {
989
- type: "file",
990
- data: part.data,
991
- mimeType: part.mimeType,
992
- providerMetadata: part.experimental_providerMetadata
993
- };
994
- } else {
995
- const downloadedImage = downloadedAssets[part.data.toString()];
996
- return {
997
- type: "file",
998
- data: convertUint8ArrayToBase642(downloadedImage.data),
999
- mimeType: (_d = part.mimeType) != null ? _d : downloadedImage.mimeType,
1000
- providerMetadata: part.experimental_providerMetadata
1001
- };
1002
- }
1003
- }
1004
- if (typeof part.data === "string") {
1005
- try {
1006
- const url = new URL(part.data);
1007
- switch (url.protocol) {
1008
- case "http:":
1009
- case "https:": {
1010
- if (downloadedAssets == null) {
1011
- return {
1012
- type: "file",
1013
- data: url,
1014
- mimeType: part.mimeType,
1015
- providerMetadata: part.experimental_providerMetadata
1016
- };
1017
- } else {
1018
- const downloadedImage = downloadedAssets[url.toString()];
1019
- return {
1020
- type: "file",
1021
- data: convertUint8ArrayToBase642(
1022
- downloadedImage.data
1023
- ),
1024
- mimeType: (_e = part.mimeType) != null ? _e : downloadedImage.mimeType,
1025
- providerMetadata: part.experimental_providerMetadata
1026
- };
1027
- }
1028
- }
1029
- case "data:": {
1030
- try {
1031
- const { mimeType, base64Content } = splitDataUrl(
1032
- part.data
1033
- );
1034
- if (mimeType == null || base64Content == null) {
1035
- throw new Error("Invalid data URL format");
1036
- }
1037
- return {
1038
- type: "file",
1039
- data: convertDataContentToBase64String(
1040
- base64Content
1041
- ),
1042
- mimeType,
1043
- providerMetadata: part.experimental_providerMetadata
1044
- };
1045
- } catch (error) {
1046
- throw new Error(
1047
- `Error processing data URL: ${getErrorMessage2(
1048
- message
1049
- )}`
1050
- );
1051
- }
1052
- }
1053
- }
1054
- } catch (_ignored) {
1055
- }
1056
- }
1057
- const imageBase64 = convertDataContentToBase64String(
1058
- part.data
1059
- );
1060
- return {
1061
- type: "file",
1062
- data: imageBase64,
1063
- mimeType: part.mimeType,
1064
- providerMetadata: part.experimental_providerMetadata
1065
- };
1066
- }
1067
- }
1068
- }
1069
- ).filter((part) => part.type !== "text" || part.text !== ""),
877
+ content: message.content.map((part) => convertPartToLanguageModelPart(part, downloadedAssets)).filter((part) => part.type !== "text" || part.text !== ""),
1070
878
  providerMetadata: message.experimental_providerMetadata
1071
879
  };
1072
880
  }
@@ -1101,6 +909,8 @@ function convertToLanguageModelMessage(message, downloadedAssets) {
1101
909
  toolCallId: part.toolCallId,
1102
910
  toolName: part.toolName,
1103
911
  result: part.result,
912
+ content: part.experimental_content,
913
+ isError: part.isError,
1104
914
  providerMetadata: part.experimental_providerMetadata
1105
915
  })),
1106
916
  providerMetadata: message.experimental_providerMetadata
@@ -1112,17 +922,19 @@ function convertToLanguageModelMessage(message, downloadedAssets) {
1112
922
  }
1113
923
  }
1114
924
  }
1115
- async function downloadAssets(messages, downloadImplementation) {
925
+ async function downloadAssets(messages, downloadImplementation, modelSupportsImageUrls, modelSupportsUrl) {
1116
926
  const urls = messages.filter((message) => message.role === "user").map((message) => message.content).filter(
1117
927
  (content) => Array.isArray(content)
1118
928
  ).flat().filter(
1119
929
  (part) => part.type === "image" || part.type === "file"
930
+ ).filter(
931
+ (part) => !(part.type === "image" && modelSupportsImageUrls === true)
1120
932
  ).map((part) => part.type === "image" ? part.image : part.data).map(
1121
933
  (part) => (
1122
934
  // support string urls:
1123
935
  typeof part === "string" && (part.startsWith("http:") || part.startsWith("https:")) ? new URL(part) : part
1124
936
  )
1125
- ).filter((image) => image instanceof URL);
937
+ ).filter((image) => image instanceof URL).filter((url) => !modelSupportsUrl(url));
1126
938
  const downloadedImages = await Promise.all(
1127
939
  urls.map(async (url) => ({
1128
940
  url,
@@ -1133,6 +945,79 @@ async function downloadAssets(messages, downloadImplementation) {
1133
945
  downloadedImages.map(({ url, data }) => [url.toString(), data])
1134
946
  );
1135
947
  }
948
+ function convertPartToLanguageModelPart(part, downloadedAssets) {
949
+ if (part.type === "text") {
950
+ return {
951
+ type: "text",
952
+ text: part.text,
953
+ providerMetadata: part.experimental_providerMetadata
954
+ };
955
+ }
956
+ let mimeType = part.mimeType;
957
+ let data;
958
+ let content;
959
+ let normalizedData;
960
+ const type = part.type;
961
+ switch (type) {
962
+ case "image":
963
+ data = part.image;
964
+ break;
965
+ case "file":
966
+ data = part.data;
967
+ break;
968
+ default:
969
+ throw new Error(`Unsupported part type: ${type}`);
970
+ }
971
+ try {
972
+ content = typeof data === "string" ? new URL(data) : data;
973
+ } catch (error) {
974
+ content = data;
975
+ }
976
+ if (content instanceof URL) {
977
+ if (content.protocol === "data:") {
978
+ const { mimeType: dataUrlMimeType, base64Content } = splitDataUrl(
979
+ content.toString()
980
+ );
981
+ if (dataUrlMimeType == null || base64Content == null) {
982
+ throw new Error(`Invalid data URL format in part ${type}`);
983
+ }
984
+ mimeType = dataUrlMimeType;
985
+ normalizedData = convertDataContentToUint8Array(base64Content);
986
+ } else {
987
+ const downloadedFile = downloadedAssets[content.toString()];
988
+ if (downloadedFile) {
989
+ normalizedData = downloadedFile.data;
990
+ mimeType != null ? mimeType : mimeType = downloadedFile.mimeType;
991
+ } else {
992
+ normalizedData = content;
993
+ }
994
+ }
995
+ } else {
996
+ normalizedData = convertDataContentToUint8Array(content);
997
+ }
998
+ switch (type) {
999
+ case "image":
1000
+ if (mimeType == null && normalizedData instanceof Uint8Array) {
1001
+ mimeType = detectImageMimeType(normalizedData);
1002
+ }
1003
+ return {
1004
+ type: "image",
1005
+ image: normalizedData,
1006
+ mimeType,
1007
+ providerMetadata: part.experimental_providerMetadata
1008
+ };
1009
+ case "file":
1010
+ if (mimeType == null) {
1011
+ throw new Error(`Mime type is missing for file part`);
1012
+ }
1013
+ return {
1014
+ type: "file",
1015
+ data: normalizedData instanceof Uint8Array ? convertDataContentToBase64String(normalizedData) : normalizedData,
1016
+ mimeType,
1017
+ providerMetadata: part.experimental_providerMetadata
1018
+ };
1019
+ }
1020
+ }
1136
1021
 
1137
1022
  // errors/invalid-argument-error.ts
1138
1023
  import { AISDKError as AISDKError5 } from "@ai-sdk/provider";
@@ -1286,13 +1171,13 @@ function prepareCallSettings({
1286
1171
  };
1287
1172
  }
1288
1173
 
1289
- // core/prompt/validate-prompt.ts
1174
+ // core/prompt/standardize-prompt.ts
1290
1175
  import { InvalidPromptError } from "@ai-sdk/provider";
1291
1176
  import { safeValidateTypes } from "@ai-sdk/provider-utils";
1292
- import { z as z6 } from "zod";
1177
+ import { z as z7 } from "zod";
1293
1178
 
1294
1179
  // core/prompt/message.ts
1295
- import { z as z5 } from "zod";
1180
+ import { z as z6 } from "zod";
1296
1181
 
1297
1182
  // core/types/provider-metadata.ts
1298
1183
  import { z as z3 } from "zod";
@@ -1317,75 +1202,91 @@ var providerMetadataSchema = z3.record(
1317
1202
  );
1318
1203
 
1319
1204
  // core/prompt/content-part.ts
1205
+ import { z as z5 } from "zod";
1206
+
1207
+ // core/prompt/tool-result-content.ts
1320
1208
  import { z as z4 } from "zod";
1321
- var textPartSchema = z4.object({
1322
- type: z4.literal("text"),
1323
- text: z4.string(),
1209
+ var toolResultContentSchema = z4.array(
1210
+ z4.union([
1211
+ z4.object({ type: z4.literal("text"), text: z4.string() }),
1212
+ z4.object({
1213
+ type: z4.literal("image"),
1214
+ data: z4.string(),
1215
+ mimeType: z4.string().optional()
1216
+ })
1217
+ ])
1218
+ );
1219
+
1220
+ // core/prompt/content-part.ts
1221
+ var textPartSchema = z5.object({
1222
+ type: z5.literal("text"),
1223
+ text: z5.string(),
1324
1224
  experimental_providerMetadata: providerMetadataSchema.optional()
1325
1225
  });
1326
- var imagePartSchema = z4.object({
1327
- type: z4.literal("image"),
1328
- image: z4.union([dataContentSchema, z4.instanceof(URL)]),
1329
- mimeType: z4.string().optional(),
1226
+ var imagePartSchema = z5.object({
1227
+ type: z5.literal("image"),
1228
+ image: z5.union([dataContentSchema, z5.instanceof(URL)]),
1229
+ mimeType: z5.string().optional(),
1330
1230
  experimental_providerMetadata: providerMetadataSchema.optional()
1331
1231
  });
1332
- var filePartSchema = z4.object({
1333
- type: z4.literal("file"),
1334
- data: z4.union([dataContentSchema, z4.instanceof(URL)]),
1335
- mimeType: z4.string(),
1232
+ var filePartSchema = z5.object({
1233
+ type: z5.literal("file"),
1234
+ data: z5.union([dataContentSchema, z5.instanceof(URL)]),
1235
+ mimeType: z5.string(),
1336
1236
  experimental_providerMetadata: providerMetadataSchema.optional()
1337
1237
  });
1338
- var toolCallPartSchema = z4.object({
1339
- type: z4.literal("tool-call"),
1340
- toolCallId: z4.string(),
1341
- toolName: z4.string(),
1342
- args: z4.unknown()
1238
+ var toolCallPartSchema = z5.object({
1239
+ type: z5.literal("tool-call"),
1240
+ toolCallId: z5.string(),
1241
+ toolName: z5.string(),
1242
+ args: z5.unknown()
1343
1243
  });
1344
- var toolResultPartSchema = z4.object({
1345
- type: z4.literal("tool-result"),
1346
- toolCallId: z4.string(),
1347
- toolName: z4.string(),
1348
- result: z4.unknown(),
1349
- isError: z4.boolean().optional(),
1244
+ var toolResultPartSchema = z5.object({
1245
+ type: z5.literal("tool-result"),
1246
+ toolCallId: z5.string(),
1247
+ toolName: z5.string(),
1248
+ result: z5.unknown(),
1249
+ content: toolResultContentSchema.optional(),
1250
+ isError: z5.boolean().optional(),
1350
1251
  experimental_providerMetadata: providerMetadataSchema.optional()
1351
1252
  });
1352
1253
 
1353
1254
  // core/prompt/message.ts
1354
- var coreSystemMessageSchema = z5.object({
1355
- role: z5.literal("system"),
1356
- content: z5.string(),
1255
+ var coreSystemMessageSchema = z6.object({
1256
+ role: z6.literal("system"),
1257
+ content: z6.string(),
1357
1258
  experimental_providerMetadata: providerMetadataSchema.optional()
1358
1259
  });
1359
- var coreUserMessageSchema = z5.object({
1360
- role: z5.literal("user"),
1361
- content: z5.union([
1362
- z5.string(),
1363
- z5.array(z5.union([textPartSchema, imagePartSchema, filePartSchema]))
1260
+ var coreUserMessageSchema = z6.object({
1261
+ role: z6.literal("user"),
1262
+ content: z6.union([
1263
+ z6.string(),
1264
+ z6.array(z6.union([textPartSchema, imagePartSchema, filePartSchema]))
1364
1265
  ]),
1365
1266
  experimental_providerMetadata: providerMetadataSchema.optional()
1366
1267
  });
1367
- var coreAssistantMessageSchema = z5.object({
1368
- role: z5.literal("assistant"),
1369
- content: z5.union([
1370
- z5.string(),
1371
- z5.array(z5.union([textPartSchema, toolCallPartSchema]))
1268
+ var coreAssistantMessageSchema = z6.object({
1269
+ role: z6.literal("assistant"),
1270
+ content: z6.union([
1271
+ z6.string(),
1272
+ z6.array(z6.union([textPartSchema, toolCallPartSchema]))
1372
1273
  ]),
1373
1274
  experimental_providerMetadata: providerMetadataSchema.optional()
1374
1275
  });
1375
- var coreToolMessageSchema = z5.object({
1376
- role: z5.literal("tool"),
1377
- content: z5.array(toolResultPartSchema),
1276
+ var coreToolMessageSchema = z6.object({
1277
+ role: z6.literal("tool"),
1278
+ content: z6.array(toolResultPartSchema),
1378
1279
  experimental_providerMetadata: providerMetadataSchema.optional()
1379
1280
  });
1380
- var coreMessageSchema = z5.union([
1281
+ var coreMessageSchema = z6.union([
1381
1282
  coreSystemMessageSchema,
1382
1283
  coreUserMessageSchema,
1383
1284
  coreAssistantMessageSchema,
1384
1285
  coreToolMessageSchema
1385
1286
  ]);
1386
1287
 
1387
- // core/prompt/validate-prompt.ts
1388
- function validatePrompt(prompt) {
1288
+ // core/prompt/standardize-prompt.ts
1289
+ function standardizePrompt(prompt) {
1389
1290
  if (prompt.prompt == null && prompt.messages == null) {
1390
1291
  throw new InvalidPromptError({
1391
1292
  prompt,
@@ -1413,15 +1314,19 @@ function validatePrompt(prompt) {
1413
1314
  }
1414
1315
  return {
1415
1316
  type: "prompt",
1416
- prompt: prompt.prompt,
1417
- messages: void 0,
1418
- system: prompt.system
1317
+ system: prompt.system,
1318
+ messages: [
1319
+ {
1320
+ role: "user",
1321
+ content: prompt.prompt
1322
+ }
1323
+ ]
1419
1324
  };
1420
1325
  }
1421
1326
  if (prompt.messages != null) {
1422
1327
  const validationResult = safeValidateTypes({
1423
1328
  value: prompt.messages,
1424
- schema: z6.array(coreMessageSchema)
1329
+ schema: z7.array(coreMessageSchema)
1425
1330
  });
1426
1331
  if (!validationResult.success) {
1427
1332
  throw new InvalidPromptError({
@@ -1432,7 +1337,6 @@ function validatePrompt(prompt) {
1432
1337
  }
1433
1338
  return {
1434
1339
  type: "messages",
1435
- prompt: void 0,
1436
1340
  messages: prompt.messages,
1437
1341
  // only possible case bc of checks above
1438
1342
  system: prompt.system
@@ -1887,7 +1791,7 @@ function validateObjectGenerationInput({
1887
1791
  }
1888
1792
 
1889
1793
  // core/generate-object/generate-object.ts
1890
- var originalGenerateId = createIdGenerator({ prefix: "aiobj-", size: 24 });
1794
+ var originalGenerateId = createIdGenerator({ prefix: "aiobj", size: 24 });
1891
1795
  async function generateObject({
1892
1796
  model,
1893
1797
  enum: enumValues,
@@ -1957,6 +1861,7 @@ async function generateObject({
1957
1861
  }),
1958
1862
  tracer,
1959
1863
  fn: async (span) => {
1864
+ var _a11, _b;
1960
1865
  const retry = retryWithExponentialBackoff({ maxRetries });
1961
1866
  if (mode === "auto" || mode == null) {
1962
1867
  mode = model.defaultObjectGenerationMode;
@@ -1967,11 +1872,12 @@ async function generateObject({
1967
1872
  let warnings;
1968
1873
  let rawResponse;
1969
1874
  let response;
1875
+ let request;
1970
1876
  let logprobs;
1971
1877
  let resultProviderMetadata;
1972
1878
  switch (mode) {
1973
1879
  case "json": {
1974
- const validatedPrompt = validatePrompt({
1880
+ const standardPrompt = standardizePrompt({
1975
1881
  system: outputStrategy.jsonSchema == null ? injectJsonInstruction({ prompt: system }) : model.supportsStructuredOutputs ? system : injectJsonInstruction({
1976
1882
  prompt: system,
1977
1883
  schema: outputStrategy.jsonSchema
@@ -1980,10 +1886,10 @@ async function generateObject({
1980
1886
  messages
1981
1887
  });
1982
1888
  const promptMessages = await convertToLanguageModelPrompt({
1983
- prompt: validatedPrompt,
1984
- modelSupportsImageUrls: model.supportsImageUrls
1889
+ prompt: standardPrompt,
1890
+ modelSupportsImageUrls: model.supportsImageUrls,
1891
+ modelSupportsUrl: model.supportsUrl
1985
1892
  });
1986
- const inputFormat = validatedPrompt.type;
1987
1893
  const generateResult = await retry(
1988
1894
  () => recordSpan({
1989
1895
  name: "ai.generateObject.doGenerate",
@@ -1996,7 +1902,7 @@ async function generateObject({
1996
1902
  }),
1997
1903
  ...baseTelemetryAttributes,
1998
1904
  "ai.prompt.format": {
1999
- input: () => inputFormat
1905
+ input: () => standardPrompt.type
2000
1906
  },
2001
1907
  "ai.prompt.messages": {
2002
1908
  input: () => JSON.stringify(promptMessages)
@@ -2015,7 +1921,7 @@ async function generateObject({
2015
1921
  }),
2016
1922
  tracer,
2017
1923
  fn: async (span2) => {
2018
- var _a11, _b, _c, _d, _e, _f;
1924
+ var _a12, _b2, _c, _d, _e, _f;
2019
1925
  const result2 = await model.doGenerate({
2020
1926
  mode: {
2021
1927
  type: "object-json",
@@ -2024,7 +1930,7 @@ async function generateObject({
2024
1930
  description: schemaDescription
2025
1931
  },
2026
1932
  ...prepareCallSettings(settings),
2027
- inputFormat,
1933
+ inputFormat: standardPrompt.type,
2028
1934
  prompt: promptMessages,
2029
1935
  providerMetadata,
2030
1936
  abortSignal,
@@ -2034,7 +1940,7 @@ async function generateObject({
2034
1940
  throw new NoObjectGeneratedError();
2035
1941
  }
2036
1942
  const responseData = {
2037
- id: (_b = (_a11 = result2.response) == null ? void 0 : _a11.id) != null ? _b : generateId3(),
1943
+ id: (_b2 = (_a12 = result2.response) == null ? void 0 : _a12.id) != null ? _b2 : generateId3(),
2038
1944
  timestamp: (_d = (_c = result2.response) == null ? void 0 : _c.timestamp) != null ? _d : currentDate(),
2039
1945
  modelId: (_f = (_e = result2.response) == null ? void 0 : _e.modelId) != null ? _f : model.modelId
2040
1946
  };
@@ -2072,18 +1978,20 @@ async function generateObject({
2072
1978
  rawResponse = generateResult.rawResponse;
2073
1979
  logprobs = generateResult.logprobs;
2074
1980
  resultProviderMetadata = generateResult.providerMetadata;
1981
+ request = (_a11 = generateResult.request) != null ? _a11 : {};
2075
1982
  response = generateResult.responseData;
2076
1983
  break;
2077
1984
  }
2078
1985
  case "tool": {
2079
- const validatedPrompt = validatePrompt({
1986
+ const validatedPrompt = standardizePrompt({
2080
1987
  system,
2081
1988
  prompt,
2082
1989
  messages
2083
1990
  });
2084
1991
  const promptMessages = await convertToLanguageModelPrompt({
2085
1992
  prompt: validatedPrompt,
2086
- modelSupportsImageUrls: model.supportsImageUrls
1993
+ modelSupportsImageUrls: model.supportsImageUrls,
1994
+ modelSupportsUrl: model.supportsUrl
2087
1995
  });
2088
1996
  const inputFormat = validatedPrompt.type;
2089
1997
  const generateResult = await retry(
@@ -2117,7 +2025,7 @@ async function generateObject({
2117
2025
  }),
2118
2026
  tracer,
2119
2027
  fn: async (span2) => {
2120
- var _a11, _b, _c, _d, _e, _f, _g, _h;
2028
+ var _a12, _b2, _c, _d, _e, _f, _g, _h;
2121
2029
  const result2 = await model.doGenerate({
2122
2030
  mode: {
2123
2031
  type: "object-tool",
@@ -2135,7 +2043,7 @@ async function generateObject({
2135
2043
  abortSignal,
2136
2044
  headers
2137
2045
  });
2138
- const objectText = (_b = (_a11 = result2.toolCalls) == null ? void 0 : _a11[0]) == null ? void 0 : _b.args;
2046
+ const objectText = (_b2 = (_a12 = result2.toolCalls) == null ? void 0 : _a12[0]) == null ? void 0 : _b2.args;
2139
2047
  if (objectText === void 0) {
2140
2048
  throw new NoObjectGeneratedError();
2141
2049
  }
@@ -2178,6 +2086,7 @@ async function generateObject({
2178
2086
  rawResponse = generateResult.rawResponse;
2179
2087
  logprobs = generateResult.logprobs;
2180
2088
  resultProviderMetadata = generateResult.providerMetadata;
2089
+ request = (_b = generateResult.request) != null ? _b : {};
2181
2090
  response = generateResult.responseData;
2182
2091
  break;
2183
2092
  }
@@ -2224,6 +2133,7 @@ async function generateObject({
2224
2133
  finishReason,
2225
2134
  usage: calculateLanguageModelUsage(usage),
2226
2135
  warnings,
2136
+ request,
2227
2137
  response: {
2228
2138
  ...response,
2229
2139
  headers: rawResponse == null ? void 0 : rawResponse.headers
@@ -2242,6 +2152,7 @@ var DefaultGenerateObjectResult = class {
2242
2152
  this.warnings = options.warnings;
2243
2153
  this.experimental_providerMetadata = options.providerMetadata;
2244
2154
  this.response = options.response;
2155
+ this.request = options.request;
2245
2156
  this.rawResponse = {
2246
2157
  headers: options.response.headers
2247
2158
  };
@@ -2373,7 +2284,7 @@ function writeToServerResponse({
2373
2284
  }
2374
2285
 
2375
2286
  // core/generate-object/stream-object.ts
2376
- var originalGenerateId2 = createIdGenerator2({ prefix: "aiobj-", size: 24 });
2287
+ var originalGenerateId2 = createIdGenerator2({ prefix: "aiobj", size: 24 });
2377
2288
  async function streamObject({
2378
2289
  model,
2379
2290
  schema: inputSchema,
@@ -2447,7 +2358,7 @@ async function streamObject({
2447
2358
  let transformer;
2448
2359
  switch (mode) {
2449
2360
  case "json": {
2450
- const validatedPrompt = validatePrompt({
2361
+ const standardPrompt = standardizePrompt({
2451
2362
  system: outputStrategy.jsonSchema == null ? injectJsonInstruction({ prompt: system }) : model.supportsStructuredOutputs ? system : injectJsonInstruction({
2452
2363
  prompt: system,
2453
2364
  schema: outputStrategy.jsonSchema
@@ -2463,10 +2374,11 @@ async function streamObject({
2463
2374
  description: schemaDescription
2464
2375
  },
2465
2376
  ...prepareCallSettings(settings),
2466
- inputFormat: validatedPrompt.type,
2377
+ inputFormat: standardPrompt.type,
2467
2378
  prompt: await convertToLanguageModelPrompt({
2468
- prompt: validatedPrompt,
2469
- modelSupportsImageUrls: model.supportsImageUrls
2379
+ prompt: standardPrompt,
2380
+ modelSupportsImageUrls: model.supportsImageUrls,
2381
+ modelSupportsUrl: model.supportsUrl
2470
2382
  }),
2471
2383
  providerMetadata,
2472
2384
  abortSignal,
@@ -2489,7 +2401,7 @@ async function streamObject({
2489
2401
  break;
2490
2402
  }
2491
2403
  case "tool": {
2492
- const validatedPrompt = validatePrompt({
2404
+ const validatedPrompt = standardizePrompt({
2493
2405
  system,
2494
2406
  prompt,
2495
2407
  messages
@@ -2508,7 +2420,8 @@ async function streamObject({
2508
2420
  inputFormat: validatedPrompt.type,
2509
2421
  prompt: await convertToLanguageModelPrompt({
2510
2422
  prompt: validatedPrompt,
2511
- modelSupportsImageUrls: model.supportsImageUrls
2423
+ modelSupportsImageUrls: model.supportsImageUrls,
2424
+ modelSupportsUrl: model.supportsUrl
2512
2425
  }),
2513
2426
  providerMetadata,
2514
2427
  abortSignal,
@@ -2541,7 +2454,7 @@ async function streamObject({
2541
2454
  }
2542
2455
  }
2543
2456
  const {
2544
- result: { stream, warnings, rawResponse },
2457
+ result: { stream, warnings, rawResponse, request },
2545
2458
  doStreamSpan,
2546
2459
  startTimestampMs
2547
2460
  } = await retry(
@@ -2587,6 +2500,7 @@ async function streamObject({
2587
2500
  stream: stream.pipeThrough(new TransformStream(transformer)),
2588
2501
  warnings,
2589
2502
  rawResponse,
2503
+ request: request != null ? request : {},
2590
2504
  onFinish,
2591
2505
  rootSpan,
2592
2506
  doStreamSpan,
@@ -2605,6 +2519,7 @@ var DefaultStreamObjectResult = class {
2605
2519
  stream,
2606
2520
  warnings,
2607
2521
  rawResponse,
2522
+ request,
2608
2523
  outputStrategy,
2609
2524
  onFinish,
2610
2525
  rootSpan,
@@ -2619,6 +2534,7 @@ var DefaultStreamObjectResult = class {
2619
2534
  this.warnings = warnings;
2620
2535
  this.rawResponse = rawResponse;
2621
2536
  this.outputStrategy = outputStrategy;
2537
+ this.request = Promise.resolve(request);
2622
2538
  this.objectPromise = new DelayedPromise();
2623
2539
  const { resolve: resolveUsage, promise: usagePromise } = createResolvablePromise();
2624
2540
  this.usage = usagePromise;
@@ -2894,7 +2810,7 @@ import {
2894
2810
  } from "@ai-sdk/provider";
2895
2811
 
2896
2812
  // errors/invalid-tool-arguments-error.ts
2897
- import { AISDKError as AISDKError7, getErrorMessage as getErrorMessage3 } from "@ai-sdk/provider";
2813
+ import { AISDKError as AISDKError7, getErrorMessage as getErrorMessage2 } from "@ai-sdk/provider";
2898
2814
  var name7 = "AI_InvalidToolArgumentsError";
2899
2815
  var marker7 = `vercel.ai.error.${name7}`;
2900
2816
  var symbol7 = Symbol.for(marker7);
@@ -2904,7 +2820,7 @@ var InvalidToolArgumentsError = class extends AISDKError7 {
2904
2820
  toolArgs,
2905
2821
  toolName,
2906
2822
  cause,
2907
- message = `Invalid arguments for tool ${toolName}: ${getErrorMessage3(
2823
+ message = `Invalid arguments for tool ${toolName}: ${getErrorMessage2(
2908
2824
  cause
2909
2825
  )}`
2910
2826
  }) {
@@ -3024,12 +2940,30 @@ function prepareToolsAndToolChoice({
3024
2940
  ([name11]) => activeTools.includes(name11)
3025
2941
  ) : Object.entries(tools);
3026
2942
  return {
3027
- tools: filteredTools.map(([name11, tool2]) => ({
3028
- type: "function",
3029
- name: name11,
3030
- description: tool2.description,
3031
- parameters: asSchema2(tool2.parameters).jsonSchema
3032
- })),
2943
+ tools: filteredTools.map(([name11, tool2]) => {
2944
+ const toolType = tool2.type;
2945
+ switch (toolType) {
2946
+ case void 0:
2947
+ case "function":
2948
+ return {
2949
+ type: "function",
2950
+ name: name11,
2951
+ description: tool2.description,
2952
+ parameters: asSchema2(tool2.parameters).jsonSchema
2953
+ };
2954
+ case "provider-defined":
2955
+ return {
2956
+ type: "provider-defined",
2957
+ name: name11,
2958
+ id: tool2.id,
2959
+ args: tool2.args
2960
+ };
2961
+ default: {
2962
+ const exhaustiveCheck = toolType;
2963
+ throw new Error(`Unsupported tool type: ${exhaustiveCheck}`);
2964
+ }
2965
+ }
2966
+ }),
3033
2967
  toolChoice: toolChoice == null ? { type: "auto" } : typeof toolChoice === "string" ? { type: toolChoice } : { type: "tool", toolName: toolChoice.toolName }
3034
2968
  };
3035
2969
  }
@@ -3085,6 +3019,7 @@ function parseToolCall({
3085
3019
  // core/generate-text/to-response-messages.ts
3086
3020
  function toResponseMessages({
3087
3021
  text = "",
3022
+ tools,
3088
3023
  toolCalls,
3089
3024
  toolResults
3090
3025
  }) {
@@ -3096,19 +3031,28 @@ function toResponseMessages({
3096
3031
  if (toolResults.length > 0) {
3097
3032
  responseMessages.push({
3098
3033
  role: "tool",
3099
- content: toolResults.map((result) => ({
3100
- type: "tool-result",
3101
- toolCallId: result.toolCallId,
3102
- toolName: result.toolName,
3103
- result: result.result
3104
- }))
3034
+ content: toolResults.map((toolResult) => {
3035
+ const tool2 = tools[toolResult.toolName];
3036
+ return (tool2 == null ? void 0 : tool2.experimental_toToolResultContent) != null ? {
3037
+ type: "tool-result",
3038
+ toolCallId: toolResult.toolCallId,
3039
+ toolName: toolResult.toolName,
3040
+ result: tool2.experimental_toToolResultContent(toolResult.result),
3041
+ content: tool2.experimental_toToolResultContent(toolResult.result)
3042
+ } : {
3043
+ type: "tool-result",
3044
+ toolCallId: toolResult.toolCallId,
3045
+ toolName: toolResult.toolName,
3046
+ result: toolResult.result
3047
+ };
3048
+ })
3105
3049
  });
3106
3050
  }
3107
3051
  return responseMessages;
3108
3052
  }
3109
3053
 
3110
3054
  // core/generate-text/generate-text.ts
3111
- var originalGenerateId3 = createIdGenerator3({ prefix: "aitxt-", size: 24 });
3055
+ var originalGenerateId3 = createIdGenerator3({ prefix: "aitxt", size: 24 });
3112
3056
  async function generateText({
3113
3057
  model,
3114
3058
  tools,
@@ -3147,6 +3091,7 @@ async function generateText({
3147
3091
  headers,
3148
3092
  settings: { ...settings, maxRetries }
3149
3093
  });
3094
+ const initialPrompt = standardizePrompt({ system, prompt, messages });
3150
3095
  const tracer = getTracer(telemetry);
3151
3096
  return recordSpan({
3152
3097
  name: "ai.generateText",
@@ -3167,22 +3112,13 @@ async function generateText({
3167
3112
  }),
3168
3113
  tracer,
3169
3114
  fn: async (span) => {
3170
- var _a11, _b, _c, _d, _e;
3115
+ var _a11, _b, _c, _d, _e, _f, _g;
3171
3116
  const retry = retryWithExponentialBackoff({ maxRetries });
3172
- const validatedPrompt = validatePrompt({
3173
- system,
3174
- prompt,
3175
- messages
3176
- });
3177
3117
  const mode = {
3178
3118
  type: "regular",
3179
3119
  ...prepareToolsAndToolChoice({ tools, toolChoice, activeTools })
3180
3120
  };
3181
3121
  const callSettings = prepareCallSettings(settings);
3182
- const promptMessages = await convertToLanguageModelPrompt({
3183
- prompt: validatedPrompt,
3184
- modelSupportsImageUrls: model.supportsImageUrls
3185
- });
3186
3122
  let currentModelResponse;
3187
3123
  let currentToolCalls = [];
3188
3124
  let currentToolResults = [];
@@ -3197,7 +3133,19 @@ async function generateText({
3197
3133
  };
3198
3134
  let stepType = "initial";
3199
3135
  do {
3200
- const currentInputFormat = stepCount === 0 ? validatedPrompt.type : "messages";
3136
+ if (stepCount === 1) {
3137
+ initialPrompt.type = "messages";
3138
+ }
3139
+ const promptFormat = stepCount === 0 ? initialPrompt.type : "messages";
3140
+ const promptMessages = await convertToLanguageModelPrompt({
3141
+ prompt: {
3142
+ type: promptFormat,
3143
+ system: initialPrompt.system,
3144
+ messages: [...initialPrompt.messages, ...responseMessages]
3145
+ },
3146
+ modelSupportsImageUrls: model.supportsImageUrls,
3147
+ modelSupportsUrl: model.supportsUrl
3148
+ });
3201
3149
  currentModelResponse = await retry(
3202
3150
  () => recordSpan({
3203
3151
  name: "ai.generateText.doGenerate",
@@ -3209,7 +3157,7 @@ async function generateText({
3209
3157
  telemetry
3210
3158
  }),
3211
3159
  ...baseTelemetryAttributes,
3212
- "ai.prompt.format": { input: () => currentInputFormat },
3160
+ "ai.prompt.format": { input: () => promptFormat },
3213
3161
  "ai.prompt.messages": {
3214
3162
  input: () => JSON.stringify(promptMessages)
3215
3163
  },
@@ -3227,11 +3175,11 @@ async function generateText({
3227
3175
  }),
3228
3176
  tracer,
3229
3177
  fn: async (span2) => {
3230
- var _a12, _b2, _c2, _d2, _e2, _f;
3178
+ var _a12, _b2, _c2, _d2, _e2, _f2;
3231
3179
  const result = await model.doGenerate({
3232
3180
  mode,
3233
3181
  ...callSettings,
3234
- inputFormat: currentInputFormat,
3182
+ inputFormat: promptFormat,
3235
3183
  prompt: promptMessages,
3236
3184
  providerMetadata,
3237
3185
  abortSignal,
@@ -3240,7 +3188,7 @@ async function generateText({
3240
3188
  const responseData = {
3241
3189
  id: (_b2 = (_a12 = result.response) == null ? void 0 : _a12.id) != null ? _b2 : generateId3(),
3242
3190
  timestamp: (_d2 = (_c2 = result.response) == null ? void 0 : _c2.timestamp) != null ? _d2 : currentDate(),
3243
- modelId: (_f = (_e2 = result.response) == null ? void 0 : _e2.modelId) != null ? _f : model.modelId
3191
+ modelId: (_f2 = (_e2 = result.response) == null ? void 0 : _e2.modelId) != null ? _f2 : model.modelId
3244
3192
  };
3245
3193
  span2.setAttributes(
3246
3194
  selectTelemetryAttributes({
@@ -3310,7 +3258,27 @@ async function generateText({
3310
3258
  }
3311
3259
  const stepText = nextStepType === "continue" ? removeTextAfterLastWhitespace((_b = currentModelResponse.text) != null ? _b : "") : (_c = currentModelResponse.text) != null ? _c : "";
3312
3260
  text = nextStepType === "continue" || stepType === "continue" ? text + stepText : stepText;
3313
- const currentStep = {
3261
+ if (stepType === "continue") {
3262
+ const lastMessage = responseMessages[responseMessages.length - 1];
3263
+ if (typeof lastMessage.content === "string") {
3264
+ lastMessage.content = text;
3265
+ } else {
3266
+ lastMessage.content.push({
3267
+ text: stepText,
3268
+ type: "text"
3269
+ });
3270
+ }
3271
+ } else {
3272
+ responseMessages.push(
3273
+ ...toResponseMessages({
3274
+ text,
3275
+ tools: tools != null ? tools : {},
3276
+ toolCalls: currentToolCalls,
3277
+ toolResults: currentToolResults
3278
+ })
3279
+ );
3280
+ }
3281
+ const currentStepResult = {
3314
3282
  stepType,
3315
3283
  text: stepText,
3316
3284
  toolCalls: currentToolCalls,
@@ -3319,55 +3287,18 @@ async function generateText({
3319
3287
  usage: currentUsage,
3320
3288
  warnings: currentModelResponse.warnings,
3321
3289
  logprobs: currentModelResponse.logprobs,
3290
+ request: (_d = currentModelResponse.request) != null ? _d : {},
3322
3291
  response: {
3323
3292
  ...currentModelResponse.response,
3324
- headers: (_d = currentModelResponse.rawResponse) == null ? void 0 : _d.headers
3293
+ headers: (_e = currentModelResponse.rawResponse) == null ? void 0 : _e.headers,
3294
+ // deep clone msgs to avoid mutating past messages in multi-step:
3295
+ messages: JSON.parse(JSON.stringify(responseMessages))
3325
3296
  },
3326
3297
  experimental_providerMetadata: currentModelResponse.providerMetadata,
3327
3298
  isContinued: nextStepType === "continue"
3328
3299
  };
3329
- steps.push(currentStep);
3330
- await (onStepFinish == null ? void 0 : onStepFinish(currentStep));
3331
- if (stepType === "continue") {
3332
- const lastResponseMessage = responseMessages.pop();
3333
- promptMessages.pop();
3334
- if (typeof lastResponseMessage.content === "string") {
3335
- lastResponseMessage.content = text;
3336
- } else {
3337
- lastResponseMessage.content.push({
3338
- text: stepText,
3339
- type: "text"
3340
- });
3341
- }
3342
- responseMessages.push(lastResponseMessage);
3343
- promptMessages.push(
3344
- convertToLanguageModelMessage(lastResponseMessage, null)
3345
- );
3346
- } else if (nextStepType === "continue") {
3347
- const newResponseMessages = toResponseMessages({
3348
- text,
3349
- toolCalls: currentToolCalls,
3350
- toolResults: currentToolResults
3351
- });
3352
- responseMessages.push(...newResponseMessages);
3353
- promptMessages.push(
3354
- ...newResponseMessages.map(
3355
- (message) => convertToLanguageModelMessage(message, null)
3356
- )
3357
- );
3358
- } else {
3359
- const newResponseMessages = toResponseMessages({
3360
- text: currentModelResponse.text,
3361
- toolCalls: currentToolCalls,
3362
- toolResults: currentToolResults
3363
- });
3364
- responseMessages.push(...newResponseMessages);
3365
- promptMessages.push(
3366
- ...newResponseMessages.map(
3367
- (message) => convertToLanguageModelMessage(message, null)
3368
- )
3369
- );
3370
- }
3300
+ steps.push(currentStepResult);
3301
+ await (onStepFinish == null ? void 0 : onStepFinish(currentStepResult));
3371
3302
  stepType = nextStepType;
3372
3303
  } while (stepType !== "done");
3373
3304
  span.setAttributes(
@@ -3401,9 +3332,11 @@ async function generateText({
3401
3332
  finishReason: currentModelResponse.finishReason,
3402
3333
  usage,
3403
3334
  warnings: currentModelResponse.warnings,
3335
+ request: (_f = currentModelResponse.request) != null ? _f : {},
3404
3336
  response: {
3405
3337
  ...currentModelResponse.response,
3406
- headers: (_e = currentModelResponse.rawResponse) == null ? void 0 : _e.headers
3338
+ headers: (_g = currentModelResponse.rawResponse) == null ? void 0 : _g.headers,
3339
+ messages: responseMessages
3407
3340
  },
3408
3341
  logprobs: currentModelResponse.logprobs,
3409
3342
  responseMessages,
@@ -3481,6 +3414,7 @@ var DefaultGenerateTextResult = class {
3481
3414
  this.finishReason = options.finishReason;
3482
3415
  this.usage = options.usage;
3483
3416
  this.warnings = options.warnings;
3417
+ this.request = options.request;
3484
3418
  this.response = options.response;
3485
3419
  this.responseMessages = options.responseMessages;
3486
3420
  this.roundtrips = options.steps;
@@ -3838,7 +3772,7 @@ function runToolsTransformation({
3838
3772
  }
3839
3773
 
3840
3774
  // core/generate-text/stream-text.ts
3841
- var originalGenerateId4 = createIdGenerator4({ prefix: "aitxt-", size: 24 });
3775
+ var originalGenerateId4 = createIdGenerator4({ prefix: "aitxt", size: 24 });
3842
3776
  async function streamText({
3843
3777
  model,
3844
3778
  tools,
@@ -3880,6 +3814,7 @@ async function streamText({
3880
3814
  settings: { ...settings, maxRetries }
3881
3815
  });
3882
3816
  const tracer = getTracer(telemetry);
3817
+ const initialPrompt = standardizePrompt({ system, prompt, messages });
3883
3818
  return recordSpan({
3884
3819
  name: "ai.streamText",
3885
3820
  attributes: selectTelemetryAttributes({
@@ -3899,11 +3834,20 @@ async function streamText({
3899
3834
  fn: async (rootSpan) => {
3900
3835
  const retry = retryWithExponentialBackoff({ maxRetries });
3901
3836
  const startStep = async ({
3902
- promptMessages: promptMessages2,
3903
- promptType
3837
+ responseMessages
3904
3838
  }) => {
3839
+ const promptFormat = responseMessages.length === 0 ? initialPrompt.type : "messages";
3840
+ const promptMessages = await convertToLanguageModelPrompt({
3841
+ prompt: {
3842
+ type: promptFormat,
3843
+ system: initialPrompt.system,
3844
+ messages: [...initialPrompt.messages, ...responseMessages]
3845
+ },
3846
+ modelSupportsImageUrls: model.supportsImageUrls,
3847
+ modelSupportsUrl: model.supportsUrl
3848
+ });
3905
3849
  const {
3906
- result: { stream: stream2, warnings: warnings2, rawResponse: rawResponse2 },
3850
+ result: { stream: stream2, warnings: warnings2, rawResponse: rawResponse2, request: request2 },
3907
3851
  doStreamSpan: doStreamSpan2,
3908
3852
  startTimestampMs: startTimestampMs2
3909
3853
  } = await retry(
@@ -3918,10 +3862,10 @@ async function streamText({
3918
3862
  }),
3919
3863
  ...baseTelemetryAttributes,
3920
3864
  "ai.prompt.format": {
3921
- input: () => promptType
3865
+ input: () => promptFormat
3922
3866
  },
3923
3867
  "ai.prompt.messages": {
3924
- input: () => JSON.stringify(promptMessages2)
3868
+ input: () => JSON.stringify(promptMessages)
3925
3869
  },
3926
3870
  // standardized gen-ai llm span attributes:
3927
3871
  "gen_ai.system": model.provider,
@@ -3951,8 +3895,8 @@ async function streamText({
3951
3895
  })
3952
3896
  },
3953
3897
  ...prepareCallSettings(settings),
3954
- inputFormat: promptType,
3955
- prompt: promptMessages2,
3898
+ inputFormat: promptFormat,
3899
+ prompt: promptMessages,
3956
3900
  providerMetadata,
3957
3901
  abortSignal,
3958
3902
  headers
@@ -3971,28 +3915,23 @@ async function streamText({
3971
3915
  abortSignal
3972
3916
  }),
3973
3917
  warnings: warnings2,
3918
+ request: request2 != null ? request2 : {},
3974
3919
  rawResponse: rawResponse2
3975
3920
  },
3976
3921
  doStreamSpan: doStreamSpan2,
3977
3922
  startTimestampMs: startTimestampMs2
3978
3923
  };
3979
3924
  };
3980
- const promptMessages = await convertToLanguageModelPrompt({
3981
- prompt: validatePrompt({ system, prompt, messages }),
3982
- modelSupportsImageUrls: model.supportsImageUrls
3983
- });
3984
3925
  const {
3985
- result: { stream, warnings, rawResponse },
3926
+ result: { stream, warnings, rawResponse, request },
3986
3927
  doStreamSpan,
3987
3928
  startTimestampMs
3988
- } = await startStep({
3989
- promptType: validatePrompt({ system, prompt, messages }).type,
3990
- promptMessages
3991
- });
3929
+ } = await startStep({ responseMessages: [] });
3992
3930
  return new DefaultStreamTextResult({
3993
3931
  stream,
3994
3932
  warnings,
3995
3933
  rawResponse,
3934
+ request,
3996
3935
  onChunk,
3997
3936
  onFinish,
3998
3937
  onStepFinish,
@@ -4003,11 +3942,11 @@ async function streamText({
4003
3942
  maxSteps,
4004
3943
  continueSteps,
4005
3944
  startStep,
4006
- promptMessages,
4007
3945
  modelId: model.modelId,
4008
3946
  now: now2,
4009
3947
  currentDate,
4010
- generateId: generateId3
3948
+ generateId: generateId3,
3949
+ tools
4011
3950
  });
4012
3951
  }
4013
3952
  });
@@ -4017,6 +3956,7 @@ var DefaultStreamTextResult = class {
4017
3956
  stream,
4018
3957
  warnings,
4019
3958
  rawResponse,
3959
+ request,
4020
3960
  onChunk,
4021
3961
  onFinish,
4022
3962
  onStepFinish,
@@ -4027,11 +3967,11 @@ var DefaultStreamTextResult = class {
4027
3967
  maxSteps,
4028
3968
  continueSteps,
4029
3969
  startStep,
4030
- promptMessages,
4031
3970
  modelId,
4032
3971
  now: now2,
4033
3972
  currentDate,
4034
- generateId: generateId3
3973
+ generateId: generateId3,
3974
+ tools
4035
3975
  }) {
4036
3976
  this.warnings = warnings;
4037
3977
  this.rawResponse = rawResponse;
@@ -4052,6 +3992,8 @@ var DefaultStreamTextResult = class {
4052
3992
  promise: providerMetadataPromise
4053
3993
  } = createResolvablePromise();
4054
3994
  this.experimental_providerMetadata = providerMetadataPromise;
3995
+ const { resolve: resolveRequest, promise: requestPromise } = createResolvablePromise();
3996
+ this.request = requestPromise;
4055
3997
  const { resolve: resolveResponse, promise: responsePromise } = createResolvablePromise();
4056
3998
  this.response = responsePromise;
4057
3999
  const {
@@ -4072,14 +4014,15 @@ var DefaultStreamTextResult = class {
4072
4014
  startTimestamp,
4073
4015
  doStreamSpan: doStreamSpan2,
4074
4016
  currentStep,
4075
- promptMessages: promptMessages2,
4017
+ responseMessages,
4076
4018
  usage = {
4077
4019
  promptTokens: 0,
4078
4020
  completionTokens: 0,
4079
4021
  totalTokens: 0
4080
4022
  },
4081
4023
  stepType,
4082
- previousStepText = ""
4024
+ previousStepText = "",
4025
+ stepRequest
4083
4026
  }) {
4084
4027
  const stepToolCalls = [];
4085
4028
  const stepToolResults = [];
@@ -4206,6 +4149,7 @@ var DefaultStreamTextResult = class {
4206
4149
  },
4207
4150
  // invoke onFinish callback and resolve toolResults promise when the stream is about to close:
4208
4151
  async flush(controller) {
4152
+ var _a11;
4209
4153
  const stepToolCallsJson = stepToolCalls.length > 0 ? JSON.stringify(stepToolCalls) : void 0;
4210
4154
  let nextStepType = "done";
4211
4155
  if (currentStep + 1 < maxSteps) {
@@ -4271,10 +4215,32 @@ var DefaultStreamTextResult = class {
4271
4215
  usage: stepUsage,
4272
4216
  experimental_providerMetadata: stepProviderMetadata,
4273
4217
  logprobs: stepLogProbs,
4274
- response: stepResponse,
4218
+ response: {
4219
+ ...stepResponse
4220
+ },
4275
4221
  isContinued: nextStepType === "continue"
4276
4222
  });
4277
- const stepResult = {
4223
+ if (stepType === "continue") {
4224
+ const lastMessage = responseMessages[responseMessages.length - 1];
4225
+ if (typeof lastMessage.content === "string") {
4226
+ lastMessage.content = stepText;
4227
+ } else {
4228
+ lastMessage.content.push({
4229
+ text: stepText,
4230
+ type: "text"
4231
+ });
4232
+ }
4233
+ } else {
4234
+ responseMessages.push(
4235
+ ...toResponseMessages({
4236
+ text: stepText,
4237
+ tools: tools != null ? tools : {},
4238
+ toolCalls: stepToolCalls,
4239
+ toolResults: stepToolResults
4240
+ })
4241
+ );
4242
+ }
4243
+ const currentStepResult = {
4278
4244
  stepType,
4279
4245
  text: stepText,
4280
4246
  toolCalls: stepToolCalls,
@@ -4283,44 +4249,30 @@ var DefaultStreamTextResult = class {
4283
4249
  usage: stepUsage,
4284
4250
  warnings: self.warnings,
4285
4251
  logprobs: stepLogProbs,
4286
- response: stepResponse,
4252
+ request: stepRequest,
4287
4253
  rawResponse: self.rawResponse,
4254
+ response: {
4255
+ ...stepResponse,
4256
+ headers: (_a11 = self.rawResponse) == null ? void 0 : _a11.headers,
4257
+ // deep clone msgs to avoid mutating past messages in multi-step:
4258
+ messages: JSON.parse(JSON.stringify(responseMessages))
4259
+ },
4288
4260
  experimental_providerMetadata: stepProviderMetadata,
4289
4261
  isContinued: nextStepType === "continue"
4290
4262
  };
4291
- stepResults.push(stepResult);
4292
- await (onStepFinish == null ? void 0 : onStepFinish(stepResult));
4263
+ stepResults.push(currentStepResult);
4264
+ await (onStepFinish == null ? void 0 : onStepFinish(currentStepResult));
4293
4265
  const combinedUsage = {
4294
4266
  promptTokens: usage.promptTokens + stepUsage.promptTokens,
4295
4267
  completionTokens: usage.completionTokens + stepUsage.completionTokens,
4296
4268
  totalTokens: usage.totalTokens + stepUsage.totalTokens
4297
4269
  };
4298
4270
  if (nextStepType !== "done") {
4299
- if (stepType === "continue") {
4300
- const lastPromptMessage = promptMessages2[promptMessages2.length - 1];
4301
- lastPromptMessage.content.push({
4302
- text: stepText,
4303
- type: "text"
4304
- });
4305
- } else {
4306
- promptMessages2.push(
4307
- ...toResponseMessages({
4308
- text: stepText,
4309
- toolCalls: stepToolCalls,
4310
- toolResults: stepToolResults
4311
- }).map(
4312
- (message) => convertToLanguageModelMessage(message, null)
4313
- )
4314
- );
4315
- }
4316
4271
  const {
4317
4272
  result,
4318
4273
  doStreamSpan: doStreamSpan3,
4319
4274
  startTimestampMs: startTimestamp2
4320
- } = await startStep({
4321
- promptType: "messages",
4322
- promptMessages: promptMessages2
4323
- });
4275
+ } = await startStep({ responseMessages });
4324
4276
  self.warnings = result.warnings;
4325
4277
  self.rawResponse = result.rawResponse;
4326
4278
  addStepStream({
@@ -4328,10 +4280,11 @@ var DefaultStreamTextResult = class {
4328
4280
  startTimestamp: startTimestamp2,
4329
4281
  doStreamSpan: doStreamSpan3,
4330
4282
  currentStep: currentStep + 1,
4331
- promptMessages: promptMessages2,
4283
+ responseMessages,
4332
4284
  usage: combinedUsage,
4333
4285
  stepType: nextStepType,
4334
- previousStepText: fullStepText
4286
+ previousStepText: fullStepText,
4287
+ stepRequest: result.request
4335
4288
  });
4336
4289
  return;
4337
4290
  }
@@ -4342,7 +4295,9 @@ var DefaultStreamTextResult = class {
4342
4295
  usage: combinedUsage,
4343
4296
  experimental_providerMetadata: stepProviderMetadata,
4344
4297
  logprobs: stepLogProbs,
4345
- response: stepResponse
4298
+ response: {
4299
+ ...stepResponse
4300
+ }
4346
4301
  });
4347
4302
  closeStitchableStream();
4348
4303
  rootSpan.setAttributes(
@@ -4365,34 +4320,13 @@ var DefaultStreamTextResult = class {
4365
4320
  }
4366
4321
  })
4367
4322
  );
4368
- const responseMessages = stepResults.reduce((responseMessages2, step) => {
4369
- if (step.stepType === "continue") {
4370
- const lastResponseMessage = responseMessages2.pop();
4371
- if (typeof lastResponseMessage.content === "string") {
4372
- lastResponseMessage.content += step.text;
4373
- } else {
4374
- lastResponseMessage.content.push({
4375
- text: step.text,
4376
- type: "text"
4377
- });
4378
- }
4379
- return [...responseMessages2, lastResponseMessage];
4380
- }
4381
- return [
4382
- ...responseMessages2,
4383
- ...toResponseMessages({
4384
- text: step.text,
4385
- toolCalls: step.toolCalls,
4386
- toolResults: step.toolResults
4387
- })
4388
- ];
4389
- }, []);
4390
4323
  resolveUsage(combinedUsage);
4391
4324
  resolveFinishReason(stepFinishReason);
4392
4325
  resolveText(fullStepText);
4393
4326
  resolveToolCalls(stepToolCalls);
4394
4327
  resolveProviderMetadata(stepProviderMetadata);
4395
4328
  resolveToolResults(stepToolResults);
4329
+ resolveRequest(stepRequest);
4396
4330
  resolveResponse({
4397
4331
  ...stepResponse,
4398
4332
  headers: rawResponse == null ? void 0 : rawResponse.headers
@@ -4410,10 +4344,12 @@ var DefaultStreamTextResult = class {
4410
4344
  // optional as well. Therefore we need to cast the toolResults to any.
4411
4345
  // The type exposed to the users will be correctly inferred.
4412
4346
  toolResults: stepToolResults,
4347
+ request: stepRequest,
4413
4348
  rawResponse,
4414
4349
  response: {
4415
4350
  ...stepResponse,
4416
- headers: rawResponse == null ? void 0 : rawResponse.headers
4351
+ headers: rawResponse == null ? void 0 : rawResponse.headers,
4352
+ messages: responseMessages
4417
4353
  },
4418
4354
  warnings,
4419
4355
  experimental_providerMetadata: stepProviderMetadata,
@@ -4435,9 +4371,10 @@ var DefaultStreamTextResult = class {
4435
4371
  startTimestamp: startTimestampMs,
4436
4372
  doStreamSpan,
4437
4373
  currentStep: 0,
4438
- promptMessages,
4374
+ responseMessages: [],
4439
4375
  usage: void 0,
4440
- stepType: "initial"
4376
+ stepType: "initial",
4377
+ stepRequest: request
4441
4378
  });
4442
4379
  }
4443
4380
  /**
@@ -4476,7 +4413,7 @@ var DefaultStreamTextResult = class {
4476
4413
  }
4477
4414
  toDataStreamInternal({
4478
4415
  callbacks = {},
4479
- getErrorMessage: getErrorMessage4 = () => "",
4416
+ getErrorMessage: getErrorMessage3 = () => "",
4480
4417
  // mask error messages for safety by default
4481
4418
  sendUsage = true
4482
4419
  } = {}) {
@@ -4551,7 +4488,7 @@ var DefaultStreamTextResult = class {
4551
4488
  }
4552
4489
  case "error": {
4553
4490
  controller.enqueue(
4554
- formatStreamPart("error", getErrorMessage4(chunk.error))
4491
+ formatStreamPart("error", getErrorMessage3(chunk.error))
4555
4492
  );
4556
4493
  break;
4557
4494
  }
@@ -4599,7 +4536,7 @@ var DefaultStreamTextResult = class {
4599
4536
  statusText: "statusText" in options ? options.statusText : void 0
4600
4537
  };
4601
4538
  const data = options == null ? void 0 : "data" in options ? options.data : void 0;
4602
- const getErrorMessage4 = options == null ? void 0 : "getErrorMessage" in options ? options.getErrorMessage : void 0;
4539
+ const getErrorMessage3 = options == null ? void 0 : "getErrorMessage" in options ? options.getErrorMessage : void 0;
4603
4540
  const sendUsage = options == null ? void 0 : "sendUsage" in options ? options.sendUsage : void 0;
4604
4541
  writeToServerResponse({
4605
4542
  response,
@@ -4609,7 +4546,7 @@ var DefaultStreamTextResult = class {
4609
4546
  contentType: "text/plain; charset=utf-8",
4610
4547
  dataStreamVersion: "v1"
4611
4548
  }),
4612
- stream: this.toDataStream({ data, getErrorMessage: getErrorMessage4, sendUsage })
4549
+ stream: this.toDataStream({ data, getErrorMessage: getErrorMessage3, sendUsage })
4613
4550
  });
4614
4551
  }
4615
4552
  pipeTextStreamToResponse(response, init) {
@@ -4641,10 +4578,10 @@ var DefaultStreamTextResult = class {
4641
4578
  statusText: "statusText" in options ? options.statusText : void 0
4642
4579
  };
4643
4580
  const data = options == null ? void 0 : "data" in options ? options.data : void 0;
4644
- const getErrorMessage4 = options == null ? void 0 : "getErrorMessage" in options ? options.getErrorMessage : void 0;
4581
+ const getErrorMessage3 = options == null ? void 0 : "getErrorMessage" in options ? options.getErrorMessage : void 0;
4645
4582
  const sendUsage = options == null ? void 0 : "sendUsage" in options ? options.sendUsage : void 0;
4646
4583
  return new Response(
4647
- this.toDataStream({ data, getErrorMessage: getErrorMessage4, sendUsage }),
4584
+ this.toDataStream({ data, getErrorMessage: getErrorMessage3, sendUsage }),
4648
4585
  {
4649
4586
  status: (_a11 = init == null ? void 0 : init.status) != null ? _a11 : 200,
4650
4587
  statusText: init == null ? void 0 : init.statusText,
@@ -4686,6 +4623,7 @@ var experimental_wrapLanguageModel = ({
4686
4623
  modelId: modelId != null ? modelId : model.modelId,
4687
4624
  defaultObjectGenerationMode: model.defaultObjectGenerationMode,
4688
4625
  supportsImageUrls: model.supportsImageUrls,
4626
+ supportsUrl: model.supportsUrl,
4689
4627
  supportsStructuredOutputs: model.supportsStructuredOutputs,
4690
4628
  async doGenerate(params) {
4691
4629
  const transformedParams = await doTransform({ params, type: "generate" });