ai 3.4.18 → 3.4.21

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -593,12 +593,6 @@ var DefaultEmbedManyResult = class {
593
593
  // core/generate-object/generate-object.ts
594
594
  import { createIdGenerator, safeParseJSON } from "@ai-sdk/provider-utils";
595
595
 
596
- // core/prompt/convert-to-language-model-prompt.ts
597
- import {
598
- convertUint8ArrayToBase64 as convertUint8ArrayToBase642,
599
- getErrorMessage as getErrorMessage2
600
- } from "@ai-sdk/provider-utils";
601
-
602
596
  // util/download-error.ts
603
597
  import { AISDKError as AISDKError2 } from "@ai-sdk/provider";
604
598
  var name2 = "AI_DownloadError";
@@ -844,19 +838,21 @@ function splitDataUrl(dataUrl) {
844
838
  async function convertToLanguageModelPrompt({
845
839
  prompt,
846
840
  modelSupportsImageUrls = true,
841
+ modelSupportsUrl = () => false,
847
842
  downloadImplementation = download
848
843
  }) {
849
- const downloadedAssets = modelSupportsImageUrls || prompt.messages == null ? null : await downloadAssets(prompt.messages, downloadImplementation);
850
- const languageModelMessages = [];
851
- if (prompt.system != null) {
852
- languageModelMessages.push({ role: "system", content: prompt.system });
853
- }
854
- languageModelMessages.push(
844
+ const downloadedAssets = await downloadAssets(
845
+ prompt.messages,
846
+ downloadImplementation,
847
+ modelSupportsImageUrls,
848
+ modelSupportsUrl
849
+ );
850
+ return [
851
+ ...prompt.system != null ? [{ role: "system", content: prompt.system }] : [],
855
852
  ...prompt.messages.map(
856
853
  (message) => convertToLanguageModelMessage(message, downloadedAssets)
857
854
  )
858
- );
859
- return languageModelMessages;
855
+ ];
860
856
  }
861
857
  function convertToLanguageModelMessage(message, downloadedAssets) {
862
858
  const role = message.role;
@@ -878,178 +874,7 @@ function convertToLanguageModelMessage(message, downloadedAssets) {
878
874
  }
879
875
  return {
880
876
  role: "user",
881
- content: message.content.map(
882
- (part) => {
883
- var _a11, _b, _c, _d, _e;
884
- switch (part.type) {
885
- case "text": {
886
- return {
887
- type: "text",
888
- text: part.text,
889
- providerMetadata: part.experimental_providerMetadata
890
- };
891
- }
892
- case "image": {
893
- if (part.image instanceof URL) {
894
- if (downloadedAssets == null) {
895
- return {
896
- type: "image",
897
- image: part.image,
898
- mimeType: part.mimeType,
899
- providerMetadata: part.experimental_providerMetadata
900
- };
901
- } else {
902
- const downloadedImage = downloadedAssets[part.image.toString()];
903
- return {
904
- type: "image",
905
- image: downloadedImage.data,
906
- mimeType: (_a11 = part.mimeType) != null ? _a11 : downloadedImage.mimeType,
907
- providerMetadata: part.experimental_providerMetadata
908
- };
909
- }
910
- }
911
- if (typeof part.image === "string") {
912
- try {
913
- const url = new URL(part.image);
914
- switch (url.protocol) {
915
- case "http:":
916
- case "https:": {
917
- if (downloadedAssets == null) {
918
- return {
919
- type: "image",
920
- image: url,
921
- mimeType: part.mimeType,
922
- providerMetadata: part.experimental_providerMetadata
923
- };
924
- } else {
925
- const downloadedImage = downloadedAssets[url.toString()];
926
- return {
927
- type: "image",
928
- image: downloadedImage.data,
929
- mimeType: (_b = part.mimeType) != null ? _b : downloadedImage.mimeType,
930
- providerMetadata: part.experimental_providerMetadata
931
- };
932
- }
933
- }
934
- case "data:": {
935
- try {
936
- const { mimeType, base64Content } = splitDataUrl(
937
- part.image
938
- );
939
- if (mimeType == null || base64Content == null) {
940
- throw new Error("Invalid data URL format");
941
- }
942
- return {
943
- type: "image",
944
- image: convertDataContentToUint8Array(base64Content),
945
- mimeType,
946
- providerMetadata: part.experimental_providerMetadata
947
- };
948
- } catch (error) {
949
- throw new Error(
950
- `Error processing data URL: ${getErrorMessage2(
951
- message
952
- )}`
953
- );
954
- }
955
- }
956
- }
957
- } catch (_ignored) {
958
- }
959
- }
960
- const imageUint8 = convertDataContentToUint8Array(part.image);
961
- return {
962
- type: "image",
963
- image: imageUint8,
964
- mimeType: (_c = part.mimeType) != null ? _c : detectImageMimeType(imageUint8),
965
- providerMetadata: part.experimental_providerMetadata
966
- };
967
- }
968
- case "file": {
969
- if (part.data instanceof URL) {
970
- if (downloadedAssets == null) {
971
- return {
972
- type: "file",
973
- data: part.data,
974
- mimeType: part.mimeType,
975
- providerMetadata: part.experimental_providerMetadata
976
- };
977
- } else {
978
- const downloadedImage = downloadedAssets[part.data.toString()];
979
- return {
980
- type: "file",
981
- data: convertUint8ArrayToBase642(downloadedImage.data),
982
- mimeType: (_d = part.mimeType) != null ? _d : downloadedImage.mimeType,
983
- providerMetadata: part.experimental_providerMetadata
984
- };
985
- }
986
- }
987
- if (typeof part.data === "string") {
988
- try {
989
- const url = new URL(part.data);
990
- switch (url.protocol) {
991
- case "http:":
992
- case "https:": {
993
- if (downloadedAssets == null) {
994
- return {
995
- type: "file",
996
- data: url,
997
- mimeType: part.mimeType,
998
- providerMetadata: part.experimental_providerMetadata
999
- };
1000
- } else {
1001
- const downloadedImage = downloadedAssets[url.toString()];
1002
- return {
1003
- type: "file",
1004
- data: convertUint8ArrayToBase642(
1005
- downloadedImage.data
1006
- ),
1007
- mimeType: (_e = part.mimeType) != null ? _e : downloadedImage.mimeType,
1008
- providerMetadata: part.experimental_providerMetadata
1009
- };
1010
- }
1011
- }
1012
- case "data:": {
1013
- try {
1014
- const { mimeType, base64Content } = splitDataUrl(
1015
- part.data
1016
- );
1017
- if (mimeType == null || base64Content == null) {
1018
- throw new Error("Invalid data URL format");
1019
- }
1020
- return {
1021
- type: "file",
1022
- data: convertDataContentToBase64String(
1023
- base64Content
1024
- ),
1025
- mimeType,
1026
- providerMetadata: part.experimental_providerMetadata
1027
- };
1028
- } catch (error) {
1029
- throw new Error(
1030
- `Error processing data URL: ${getErrorMessage2(
1031
- message
1032
- )}`
1033
- );
1034
- }
1035
- }
1036
- }
1037
- } catch (_ignored) {
1038
- }
1039
- }
1040
- const imageBase64 = convertDataContentToBase64String(
1041
- part.data
1042
- );
1043
- return {
1044
- type: "file",
1045
- data: imageBase64,
1046
- mimeType: part.mimeType,
1047
- providerMetadata: part.experimental_providerMetadata
1048
- };
1049
- }
1050
- }
1051
- }
1052
- ).filter((part) => part.type !== "text" || part.text !== ""),
877
+ content: message.content.map((part) => convertPartToLanguageModelPart(part, downloadedAssets)).filter((part) => part.type !== "text" || part.text !== ""),
1053
878
  providerMetadata: message.experimental_providerMetadata
1054
879
  };
1055
880
  }
@@ -1084,6 +909,8 @@ function convertToLanguageModelMessage(message, downloadedAssets) {
1084
909
  toolCallId: part.toolCallId,
1085
910
  toolName: part.toolName,
1086
911
  result: part.result,
912
+ content: part.experimental_content,
913
+ isError: part.isError,
1087
914
  providerMetadata: part.experimental_providerMetadata
1088
915
  })),
1089
916
  providerMetadata: message.experimental_providerMetadata
@@ -1095,17 +922,19 @@ function convertToLanguageModelMessage(message, downloadedAssets) {
1095
922
  }
1096
923
  }
1097
924
  }
1098
- async function downloadAssets(messages, downloadImplementation) {
925
+ async function downloadAssets(messages, downloadImplementation, modelSupportsImageUrls, modelSupportsUrl) {
1099
926
  const urls = messages.filter((message) => message.role === "user").map((message) => message.content).filter(
1100
927
  (content) => Array.isArray(content)
1101
928
  ).flat().filter(
1102
929
  (part) => part.type === "image" || part.type === "file"
930
+ ).filter(
931
+ (part) => !(part.type === "image" && modelSupportsImageUrls === true)
1103
932
  ).map((part) => part.type === "image" ? part.image : part.data).map(
1104
933
  (part) => (
1105
934
  // support string urls:
1106
935
  typeof part === "string" && (part.startsWith("http:") || part.startsWith("https:")) ? new URL(part) : part
1107
936
  )
1108
- ).filter((image) => image instanceof URL);
937
+ ).filter((image) => image instanceof URL).filter((url) => !modelSupportsUrl(url));
1109
938
  const downloadedImages = await Promise.all(
1110
939
  urls.map(async (url) => ({
1111
940
  url,
@@ -1116,6 +945,79 @@ async function downloadAssets(messages, downloadImplementation) {
1116
945
  downloadedImages.map(({ url, data }) => [url.toString(), data])
1117
946
  );
1118
947
  }
948
+ function convertPartToLanguageModelPart(part, downloadedAssets) {
949
+ if (part.type === "text") {
950
+ return {
951
+ type: "text",
952
+ text: part.text,
953
+ providerMetadata: part.experimental_providerMetadata
954
+ };
955
+ }
956
+ let mimeType = part.mimeType;
957
+ let data;
958
+ let content;
959
+ let normalizedData;
960
+ const type = part.type;
961
+ switch (type) {
962
+ case "image":
963
+ data = part.image;
964
+ break;
965
+ case "file":
966
+ data = part.data;
967
+ break;
968
+ default:
969
+ throw new Error(`Unsupported part type: ${type}`);
970
+ }
971
+ try {
972
+ content = typeof data === "string" ? new URL(data) : data;
973
+ } catch (error) {
974
+ content = data;
975
+ }
976
+ if (content instanceof URL) {
977
+ if (content.protocol === "data:") {
978
+ const { mimeType: dataUrlMimeType, base64Content } = splitDataUrl(
979
+ content.toString()
980
+ );
981
+ if (dataUrlMimeType == null || base64Content == null) {
982
+ throw new Error(`Invalid data URL format in part ${type}`);
983
+ }
984
+ mimeType = dataUrlMimeType;
985
+ normalizedData = convertDataContentToUint8Array(base64Content);
986
+ } else {
987
+ const downloadedFile = downloadedAssets[content.toString()];
988
+ if (downloadedFile) {
989
+ normalizedData = downloadedFile.data;
990
+ mimeType != null ? mimeType : mimeType = downloadedFile.mimeType;
991
+ } else {
992
+ normalizedData = content;
993
+ }
994
+ }
995
+ } else {
996
+ normalizedData = convertDataContentToUint8Array(content);
997
+ }
998
+ switch (type) {
999
+ case "image":
1000
+ if (mimeType == null && normalizedData instanceof Uint8Array) {
1001
+ mimeType = detectImageMimeType(normalizedData);
1002
+ }
1003
+ return {
1004
+ type: "image",
1005
+ image: normalizedData,
1006
+ mimeType,
1007
+ providerMetadata: part.experimental_providerMetadata
1008
+ };
1009
+ case "file":
1010
+ if (mimeType == null) {
1011
+ throw new Error(`Mime type is missing for file part`);
1012
+ }
1013
+ return {
1014
+ type: "file",
1015
+ data: normalizedData instanceof Uint8Array ? convertDataContentToBase64String(normalizedData) : normalizedData,
1016
+ mimeType,
1017
+ providerMetadata: part.experimental_providerMetadata
1018
+ };
1019
+ }
1020
+ }
1119
1021
 
1120
1022
  // errors/invalid-argument-error.ts
1121
1023
  import { AISDKError as AISDKError5 } from "@ai-sdk/provider";
@@ -1272,10 +1174,10 @@ function prepareCallSettings({
1272
1174
  // core/prompt/standardize-prompt.ts
1273
1175
  import { InvalidPromptError } from "@ai-sdk/provider";
1274
1176
  import { safeValidateTypes } from "@ai-sdk/provider-utils";
1275
- import { z as z6 } from "zod";
1177
+ import { z as z7 } from "zod";
1276
1178
 
1277
1179
  // core/prompt/message.ts
1278
- import { z as z5 } from "zod";
1180
+ import { z as z6 } from "zod";
1279
1181
 
1280
1182
  // core/types/provider-metadata.ts
1281
1183
  import { z as z3 } from "zod";
@@ -1300,67 +1202,83 @@ var providerMetadataSchema = z3.record(
1300
1202
  );
1301
1203
 
1302
1204
  // core/prompt/content-part.ts
1205
+ import { z as z5 } from "zod";
1206
+
1207
+ // core/prompt/tool-result-content.ts
1303
1208
  import { z as z4 } from "zod";
1304
- var textPartSchema = z4.object({
1305
- type: z4.literal("text"),
1306
- text: z4.string(),
1209
+ var toolResultContentSchema = z4.array(
1210
+ z4.union([
1211
+ z4.object({ type: z4.literal("text"), text: z4.string() }),
1212
+ z4.object({
1213
+ type: z4.literal("image"),
1214
+ data: z4.string(),
1215
+ mimeType: z4.string().optional()
1216
+ })
1217
+ ])
1218
+ );
1219
+
1220
+ // core/prompt/content-part.ts
1221
+ var textPartSchema = z5.object({
1222
+ type: z5.literal("text"),
1223
+ text: z5.string(),
1307
1224
  experimental_providerMetadata: providerMetadataSchema.optional()
1308
1225
  });
1309
- var imagePartSchema = z4.object({
1310
- type: z4.literal("image"),
1311
- image: z4.union([dataContentSchema, z4.instanceof(URL)]),
1312
- mimeType: z4.string().optional(),
1226
+ var imagePartSchema = z5.object({
1227
+ type: z5.literal("image"),
1228
+ image: z5.union([dataContentSchema, z5.instanceof(URL)]),
1229
+ mimeType: z5.string().optional(),
1313
1230
  experimental_providerMetadata: providerMetadataSchema.optional()
1314
1231
  });
1315
- var filePartSchema = z4.object({
1316
- type: z4.literal("file"),
1317
- data: z4.union([dataContentSchema, z4.instanceof(URL)]),
1318
- mimeType: z4.string(),
1232
+ var filePartSchema = z5.object({
1233
+ type: z5.literal("file"),
1234
+ data: z5.union([dataContentSchema, z5.instanceof(URL)]),
1235
+ mimeType: z5.string(),
1319
1236
  experimental_providerMetadata: providerMetadataSchema.optional()
1320
1237
  });
1321
- var toolCallPartSchema = z4.object({
1322
- type: z4.literal("tool-call"),
1323
- toolCallId: z4.string(),
1324
- toolName: z4.string(),
1325
- args: z4.unknown()
1238
+ var toolCallPartSchema = z5.object({
1239
+ type: z5.literal("tool-call"),
1240
+ toolCallId: z5.string(),
1241
+ toolName: z5.string(),
1242
+ args: z5.unknown()
1326
1243
  });
1327
- var toolResultPartSchema = z4.object({
1328
- type: z4.literal("tool-result"),
1329
- toolCallId: z4.string(),
1330
- toolName: z4.string(),
1331
- result: z4.unknown(),
1332
- isError: z4.boolean().optional(),
1244
+ var toolResultPartSchema = z5.object({
1245
+ type: z5.literal("tool-result"),
1246
+ toolCallId: z5.string(),
1247
+ toolName: z5.string(),
1248
+ result: z5.unknown(),
1249
+ content: toolResultContentSchema.optional(),
1250
+ isError: z5.boolean().optional(),
1333
1251
  experimental_providerMetadata: providerMetadataSchema.optional()
1334
1252
  });
1335
1253
 
1336
1254
  // core/prompt/message.ts
1337
- var coreSystemMessageSchema = z5.object({
1338
- role: z5.literal("system"),
1339
- content: z5.string(),
1255
+ var coreSystemMessageSchema = z6.object({
1256
+ role: z6.literal("system"),
1257
+ content: z6.string(),
1340
1258
  experimental_providerMetadata: providerMetadataSchema.optional()
1341
1259
  });
1342
- var coreUserMessageSchema = z5.object({
1343
- role: z5.literal("user"),
1344
- content: z5.union([
1345
- z5.string(),
1346
- z5.array(z5.union([textPartSchema, imagePartSchema, filePartSchema]))
1260
+ var coreUserMessageSchema = z6.object({
1261
+ role: z6.literal("user"),
1262
+ content: z6.union([
1263
+ z6.string(),
1264
+ z6.array(z6.union([textPartSchema, imagePartSchema, filePartSchema]))
1347
1265
  ]),
1348
1266
  experimental_providerMetadata: providerMetadataSchema.optional()
1349
1267
  });
1350
- var coreAssistantMessageSchema = z5.object({
1351
- role: z5.literal("assistant"),
1352
- content: z5.union([
1353
- z5.string(),
1354
- z5.array(z5.union([textPartSchema, toolCallPartSchema]))
1268
+ var coreAssistantMessageSchema = z6.object({
1269
+ role: z6.literal("assistant"),
1270
+ content: z6.union([
1271
+ z6.string(),
1272
+ z6.array(z6.union([textPartSchema, toolCallPartSchema]))
1355
1273
  ]),
1356
1274
  experimental_providerMetadata: providerMetadataSchema.optional()
1357
1275
  });
1358
- var coreToolMessageSchema = z5.object({
1359
- role: z5.literal("tool"),
1360
- content: z5.array(toolResultPartSchema),
1276
+ var coreToolMessageSchema = z6.object({
1277
+ role: z6.literal("tool"),
1278
+ content: z6.array(toolResultPartSchema),
1361
1279
  experimental_providerMetadata: providerMetadataSchema.optional()
1362
1280
  });
1363
- var coreMessageSchema = z5.union([
1281
+ var coreMessageSchema = z6.union([
1364
1282
  coreSystemMessageSchema,
1365
1283
  coreUserMessageSchema,
1366
1284
  coreAssistantMessageSchema,
@@ -1408,7 +1326,7 @@ function standardizePrompt(prompt) {
1408
1326
  if (prompt.messages != null) {
1409
1327
  const validationResult = safeValidateTypes({
1410
1328
  value: prompt.messages,
1411
- schema: z6.array(coreMessageSchema)
1329
+ schema: z7.array(coreMessageSchema)
1412
1330
  });
1413
1331
  if (!validationResult.success) {
1414
1332
  throw new InvalidPromptError({
@@ -1873,7 +1791,7 @@ function validateObjectGenerationInput({
1873
1791
  }
1874
1792
 
1875
1793
  // core/generate-object/generate-object.ts
1876
- var originalGenerateId = createIdGenerator({ prefix: "aiobj-", size: 24 });
1794
+ var originalGenerateId = createIdGenerator({ prefix: "aiobj", size: 24 });
1877
1795
  async function generateObject({
1878
1796
  model,
1879
1797
  enum: enumValues,
@@ -1943,6 +1861,7 @@ async function generateObject({
1943
1861
  }),
1944
1862
  tracer,
1945
1863
  fn: async (span) => {
1864
+ var _a11, _b;
1946
1865
  const retry = retryWithExponentialBackoff({ maxRetries });
1947
1866
  if (mode === "auto" || mode == null) {
1948
1867
  mode = model.defaultObjectGenerationMode;
@@ -1953,6 +1872,7 @@ async function generateObject({
1953
1872
  let warnings;
1954
1873
  let rawResponse;
1955
1874
  let response;
1875
+ let request;
1956
1876
  let logprobs;
1957
1877
  let resultProviderMetadata;
1958
1878
  switch (mode) {
@@ -1967,7 +1887,8 @@ async function generateObject({
1967
1887
  });
1968
1888
  const promptMessages = await convertToLanguageModelPrompt({
1969
1889
  prompt: standardPrompt,
1970
- modelSupportsImageUrls: model.supportsImageUrls
1890
+ modelSupportsImageUrls: model.supportsImageUrls,
1891
+ modelSupportsUrl: model.supportsUrl
1971
1892
  });
1972
1893
  const generateResult = await retry(
1973
1894
  () => recordSpan({
@@ -2000,7 +1921,7 @@ async function generateObject({
2000
1921
  }),
2001
1922
  tracer,
2002
1923
  fn: async (span2) => {
2003
- var _a11, _b, _c, _d, _e, _f;
1924
+ var _a12, _b2, _c, _d, _e, _f;
2004
1925
  const result2 = await model.doGenerate({
2005
1926
  mode: {
2006
1927
  type: "object-json",
@@ -2019,7 +1940,7 @@ async function generateObject({
2019
1940
  throw new NoObjectGeneratedError();
2020
1941
  }
2021
1942
  const responseData = {
2022
- id: (_b = (_a11 = result2.response) == null ? void 0 : _a11.id) != null ? _b : generateId3(),
1943
+ id: (_b2 = (_a12 = result2.response) == null ? void 0 : _a12.id) != null ? _b2 : generateId3(),
2023
1944
  timestamp: (_d = (_c = result2.response) == null ? void 0 : _c.timestamp) != null ? _d : currentDate(),
2024
1945
  modelId: (_f = (_e = result2.response) == null ? void 0 : _e.modelId) != null ? _f : model.modelId
2025
1946
  };
@@ -2057,6 +1978,7 @@ async function generateObject({
2057
1978
  rawResponse = generateResult.rawResponse;
2058
1979
  logprobs = generateResult.logprobs;
2059
1980
  resultProviderMetadata = generateResult.providerMetadata;
1981
+ request = (_a11 = generateResult.request) != null ? _a11 : {};
2060
1982
  response = generateResult.responseData;
2061
1983
  break;
2062
1984
  }
@@ -2068,7 +1990,8 @@ async function generateObject({
2068
1990
  });
2069
1991
  const promptMessages = await convertToLanguageModelPrompt({
2070
1992
  prompt: validatedPrompt,
2071
- modelSupportsImageUrls: model.supportsImageUrls
1993
+ modelSupportsImageUrls: model.supportsImageUrls,
1994
+ modelSupportsUrl: model.supportsUrl
2072
1995
  });
2073
1996
  const inputFormat = validatedPrompt.type;
2074
1997
  const generateResult = await retry(
@@ -2102,7 +2025,7 @@ async function generateObject({
2102
2025
  }),
2103
2026
  tracer,
2104
2027
  fn: async (span2) => {
2105
- var _a11, _b, _c, _d, _e, _f, _g, _h;
2028
+ var _a12, _b2, _c, _d, _e, _f, _g, _h;
2106
2029
  const result2 = await model.doGenerate({
2107
2030
  mode: {
2108
2031
  type: "object-tool",
@@ -2120,7 +2043,7 @@ async function generateObject({
2120
2043
  abortSignal,
2121
2044
  headers
2122
2045
  });
2123
- const objectText = (_b = (_a11 = result2.toolCalls) == null ? void 0 : _a11[0]) == null ? void 0 : _b.args;
2046
+ const objectText = (_b2 = (_a12 = result2.toolCalls) == null ? void 0 : _a12[0]) == null ? void 0 : _b2.args;
2124
2047
  if (objectText === void 0) {
2125
2048
  throw new NoObjectGeneratedError();
2126
2049
  }
@@ -2163,6 +2086,7 @@ async function generateObject({
2163
2086
  rawResponse = generateResult.rawResponse;
2164
2087
  logprobs = generateResult.logprobs;
2165
2088
  resultProviderMetadata = generateResult.providerMetadata;
2089
+ request = (_b = generateResult.request) != null ? _b : {};
2166
2090
  response = generateResult.responseData;
2167
2091
  break;
2168
2092
  }
@@ -2209,6 +2133,7 @@ async function generateObject({
2209
2133
  finishReason,
2210
2134
  usage: calculateLanguageModelUsage(usage),
2211
2135
  warnings,
2136
+ request,
2212
2137
  response: {
2213
2138
  ...response,
2214
2139
  headers: rawResponse == null ? void 0 : rawResponse.headers
@@ -2227,6 +2152,7 @@ var DefaultGenerateObjectResult = class {
2227
2152
  this.warnings = options.warnings;
2228
2153
  this.experimental_providerMetadata = options.providerMetadata;
2229
2154
  this.response = options.response;
2155
+ this.request = options.request;
2230
2156
  this.rawResponse = {
2231
2157
  headers: options.response.headers
2232
2158
  };
@@ -2358,7 +2284,7 @@ function writeToServerResponse({
2358
2284
  }
2359
2285
 
2360
2286
  // core/generate-object/stream-object.ts
2361
- var originalGenerateId2 = createIdGenerator2({ prefix: "aiobj-", size: 24 });
2287
+ var originalGenerateId2 = createIdGenerator2({ prefix: "aiobj", size: 24 });
2362
2288
  async function streamObject({
2363
2289
  model,
2364
2290
  schema: inputSchema,
@@ -2451,7 +2377,8 @@ async function streamObject({
2451
2377
  inputFormat: standardPrompt.type,
2452
2378
  prompt: await convertToLanguageModelPrompt({
2453
2379
  prompt: standardPrompt,
2454
- modelSupportsImageUrls: model.supportsImageUrls
2380
+ modelSupportsImageUrls: model.supportsImageUrls,
2381
+ modelSupportsUrl: model.supportsUrl
2455
2382
  }),
2456
2383
  providerMetadata,
2457
2384
  abortSignal,
@@ -2493,7 +2420,8 @@ async function streamObject({
2493
2420
  inputFormat: validatedPrompt.type,
2494
2421
  prompt: await convertToLanguageModelPrompt({
2495
2422
  prompt: validatedPrompt,
2496
- modelSupportsImageUrls: model.supportsImageUrls
2423
+ modelSupportsImageUrls: model.supportsImageUrls,
2424
+ modelSupportsUrl: model.supportsUrl
2497
2425
  }),
2498
2426
  providerMetadata,
2499
2427
  abortSignal,
@@ -2526,7 +2454,7 @@ async function streamObject({
2526
2454
  }
2527
2455
  }
2528
2456
  const {
2529
- result: { stream, warnings, rawResponse },
2457
+ result: { stream, warnings, rawResponse, request },
2530
2458
  doStreamSpan,
2531
2459
  startTimestampMs
2532
2460
  } = await retry(
@@ -2572,6 +2500,7 @@ async function streamObject({
2572
2500
  stream: stream.pipeThrough(new TransformStream(transformer)),
2573
2501
  warnings,
2574
2502
  rawResponse,
2503
+ request: request != null ? request : {},
2575
2504
  onFinish,
2576
2505
  rootSpan,
2577
2506
  doStreamSpan,
@@ -2590,6 +2519,7 @@ var DefaultStreamObjectResult = class {
2590
2519
  stream,
2591
2520
  warnings,
2592
2521
  rawResponse,
2522
+ request,
2593
2523
  outputStrategy,
2594
2524
  onFinish,
2595
2525
  rootSpan,
@@ -2604,6 +2534,7 @@ var DefaultStreamObjectResult = class {
2604
2534
  this.warnings = warnings;
2605
2535
  this.rawResponse = rawResponse;
2606
2536
  this.outputStrategy = outputStrategy;
2537
+ this.request = Promise.resolve(request);
2607
2538
  this.objectPromise = new DelayedPromise();
2608
2539
  const { resolve: resolveUsage, promise: usagePromise } = createResolvablePromise();
2609
2540
  this.usage = usagePromise;
@@ -2879,7 +2810,7 @@ import {
2879
2810
  } from "@ai-sdk/provider";
2880
2811
 
2881
2812
  // errors/invalid-tool-arguments-error.ts
2882
- import { AISDKError as AISDKError7, getErrorMessage as getErrorMessage3 } from "@ai-sdk/provider";
2813
+ import { AISDKError as AISDKError7, getErrorMessage as getErrorMessage2 } from "@ai-sdk/provider";
2883
2814
  var name7 = "AI_InvalidToolArgumentsError";
2884
2815
  var marker7 = `vercel.ai.error.${name7}`;
2885
2816
  var symbol7 = Symbol.for(marker7);
@@ -2889,7 +2820,7 @@ var InvalidToolArgumentsError = class extends AISDKError7 {
2889
2820
  toolArgs,
2890
2821
  toolName,
2891
2822
  cause,
2892
- message = `Invalid arguments for tool ${toolName}: ${getErrorMessage3(
2823
+ message = `Invalid arguments for tool ${toolName}: ${getErrorMessage2(
2893
2824
  cause
2894
2825
  )}`
2895
2826
  }) {
@@ -3009,12 +2940,30 @@ function prepareToolsAndToolChoice({
3009
2940
  ([name11]) => activeTools.includes(name11)
3010
2941
  ) : Object.entries(tools);
3011
2942
  return {
3012
- tools: filteredTools.map(([name11, tool2]) => ({
3013
- type: "function",
3014
- name: name11,
3015
- description: tool2.description,
3016
- parameters: asSchema2(tool2.parameters).jsonSchema
3017
- })),
2943
+ tools: filteredTools.map(([name11, tool2]) => {
2944
+ const toolType = tool2.type;
2945
+ switch (toolType) {
2946
+ case void 0:
2947
+ case "function":
2948
+ return {
2949
+ type: "function",
2950
+ name: name11,
2951
+ description: tool2.description,
2952
+ parameters: asSchema2(tool2.parameters).jsonSchema
2953
+ };
2954
+ case "provider-defined":
2955
+ return {
2956
+ type: "provider-defined",
2957
+ name: name11,
2958
+ id: tool2.id,
2959
+ args: tool2.args
2960
+ };
2961
+ default: {
2962
+ const exhaustiveCheck = toolType;
2963
+ throw new Error(`Unsupported tool type: ${exhaustiveCheck}`);
2964
+ }
2965
+ }
2966
+ }),
3018
2967
  toolChoice: toolChoice == null ? { type: "auto" } : typeof toolChoice === "string" ? { type: toolChoice } : { type: "tool", toolName: toolChoice.toolName }
3019
2968
  };
3020
2969
  }
@@ -3070,6 +3019,7 @@ function parseToolCall({
3070
3019
  // core/generate-text/to-response-messages.ts
3071
3020
  function toResponseMessages({
3072
3021
  text = "",
3022
+ tools,
3073
3023
  toolCalls,
3074
3024
  toolResults
3075
3025
  }) {
@@ -3081,19 +3031,30 @@ function toResponseMessages({
3081
3031
  if (toolResults.length > 0) {
3082
3032
  responseMessages.push({
3083
3033
  role: "tool",
3084
- content: toolResults.map((result) => ({
3085
- type: "tool-result",
3086
- toolCallId: result.toolCallId,
3087
- toolName: result.toolName,
3088
- result: result.result
3089
- }))
3034
+ content: toolResults.map((toolResult) => {
3035
+ const tool2 = tools[toolResult.toolName];
3036
+ return (tool2 == null ? void 0 : tool2.experimental_toToolResultContent) != null ? {
3037
+ type: "tool-result",
3038
+ toolCallId: toolResult.toolCallId,
3039
+ toolName: toolResult.toolName,
3040
+ result: tool2.experimental_toToolResultContent(toolResult.result),
3041
+ experimental_content: tool2.experimental_toToolResultContent(
3042
+ toolResult.result
3043
+ )
3044
+ } : {
3045
+ type: "tool-result",
3046
+ toolCallId: toolResult.toolCallId,
3047
+ toolName: toolResult.toolName,
3048
+ result: toolResult.result
3049
+ };
3050
+ })
3090
3051
  });
3091
3052
  }
3092
3053
  return responseMessages;
3093
3054
  }
3094
3055
 
3095
3056
  // core/generate-text/generate-text.ts
3096
- var originalGenerateId3 = createIdGenerator3({ prefix: "aitxt-", size: 24 });
3057
+ var originalGenerateId3 = createIdGenerator3({ prefix: "aitxt", size: 24 });
3097
3058
  async function generateText({
3098
3059
  model,
3099
3060
  tools,
@@ -3132,6 +3093,7 @@ async function generateText({
3132
3093
  headers,
3133
3094
  settings: { ...settings, maxRetries }
3134
3095
  });
3096
+ const initialPrompt = standardizePrompt({ system, prompt, messages });
3135
3097
  const tracer = getTracer(telemetry);
3136
3098
  return recordSpan({
3137
3099
  name: "ai.generateText",
@@ -3152,9 +3114,8 @@ async function generateText({
3152
3114
  }),
3153
3115
  tracer,
3154
3116
  fn: async (span) => {
3155
- var _a11, _b, _c, _d, _e;
3117
+ var _a11, _b, _c, _d, _e, _f, _g;
3156
3118
  const retry = retryWithExponentialBackoff({ maxRetries });
3157
- const currentPrompt = standardizePrompt({ system, prompt, messages });
3158
3119
  const mode = {
3159
3120
  type: "regular",
3160
3121
  ...prepareToolsAndToolChoice({ tools, toolChoice, activeTools })
@@ -3175,11 +3136,17 @@ async function generateText({
3175
3136
  let stepType = "initial";
3176
3137
  do {
3177
3138
  if (stepCount === 1) {
3178
- currentPrompt.type = "messages";
3139
+ initialPrompt.type = "messages";
3179
3140
  }
3141
+ const promptFormat = stepCount === 0 ? initialPrompt.type : "messages";
3180
3142
  const promptMessages = await convertToLanguageModelPrompt({
3181
- prompt: currentPrompt,
3182
- modelSupportsImageUrls: model.supportsImageUrls
3143
+ prompt: {
3144
+ type: promptFormat,
3145
+ system: initialPrompt.system,
3146
+ messages: [...initialPrompt.messages, ...responseMessages]
3147
+ },
3148
+ modelSupportsImageUrls: model.supportsImageUrls,
3149
+ modelSupportsUrl: model.supportsUrl
3183
3150
  });
3184
3151
  currentModelResponse = await retry(
3185
3152
  () => recordSpan({
@@ -3192,7 +3159,7 @@ async function generateText({
3192
3159
  telemetry
3193
3160
  }),
3194
3161
  ...baseTelemetryAttributes,
3195
- "ai.prompt.format": { input: () => currentPrompt.type },
3162
+ "ai.prompt.format": { input: () => promptFormat },
3196
3163
  "ai.prompt.messages": {
3197
3164
  input: () => JSON.stringify(promptMessages)
3198
3165
  },
@@ -3210,11 +3177,11 @@ async function generateText({
3210
3177
  }),
3211
3178
  tracer,
3212
3179
  fn: async (span2) => {
3213
- var _a12, _b2, _c2, _d2, _e2, _f;
3180
+ var _a12, _b2, _c2, _d2, _e2, _f2;
3214
3181
  const result = await model.doGenerate({
3215
3182
  mode,
3216
3183
  ...callSettings,
3217
- inputFormat: currentPrompt.type,
3184
+ inputFormat: promptFormat,
3218
3185
  prompt: promptMessages,
3219
3186
  providerMetadata,
3220
3187
  abortSignal,
@@ -3223,7 +3190,7 @@ async function generateText({
3223
3190
  const responseData = {
3224
3191
  id: (_b2 = (_a12 = result.response) == null ? void 0 : _a12.id) != null ? _b2 : generateId3(),
3225
3192
  timestamp: (_d2 = (_c2 = result.response) == null ? void 0 : _c2.timestamp) != null ? _d2 : currentDate(),
3226
- modelId: (_f = (_e2 = result.response) == null ? void 0 : _e2.modelId) != null ? _f : model.modelId
3193
+ modelId: (_f2 = (_e2 = result.response) == null ? void 0 : _e2.modelId) != null ? _f2 : model.modelId
3227
3194
  };
3228
3195
  span2.setAttributes(
3229
3196
  selectTelemetryAttributes({
@@ -3293,7 +3260,27 @@ async function generateText({
3293
3260
  }
3294
3261
  const stepText = nextStepType === "continue" ? removeTextAfterLastWhitespace((_b = currentModelResponse.text) != null ? _b : "") : (_c = currentModelResponse.text) != null ? _c : "";
3295
3262
  text = nextStepType === "continue" || stepType === "continue" ? text + stepText : stepText;
3296
- const currentStep = {
3263
+ if (stepType === "continue") {
3264
+ const lastMessage = responseMessages[responseMessages.length - 1];
3265
+ if (typeof lastMessage.content === "string") {
3266
+ lastMessage.content = text;
3267
+ } else {
3268
+ lastMessage.content.push({
3269
+ text: stepText,
3270
+ type: "text"
3271
+ });
3272
+ }
3273
+ } else {
3274
+ responseMessages.push(
3275
+ ...toResponseMessages({
3276
+ text,
3277
+ tools: tools != null ? tools : {},
3278
+ toolCalls: currentToolCalls,
3279
+ toolResults: currentToolResults
3280
+ })
3281
+ );
3282
+ }
3283
+ const currentStepResult = {
3297
3284
  stepType,
3298
3285
  text: stepText,
3299
3286
  toolCalls: currentToolCalls,
@@ -3302,36 +3289,18 @@ async function generateText({
3302
3289
  usage: currentUsage,
3303
3290
  warnings: currentModelResponse.warnings,
3304
3291
  logprobs: currentModelResponse.logprobs,
3292
+ request: (_d = currentModelResponse.request) != null ? _d : {},
3305
3293
  response: {
3306
3294
  ...currentModelResponse.response,
3307
- headers: (_d = currentModelResponse.rawResponse) == null ? void 0 : _d.headers
3295
+ headers: (_e = currentModelResponse.rawResponse) == null ? void 0 : _e.headers,
3296
+ // deep clone msgs to avoid mutating past messages in multi-step:
3297
+ messages: JSON.parse(JSON.stringify(responseMessages))
3308
3298
  },
3309
3299
  experimental_providerMetadata: currentModelResponse.providerMetadata,
3310
3300
  isContinued: nextStepType === "continue"
3311
3301
  };
3312
- steps.push(currentStep);
3313
- await (onStepFinish == null ? void 0 : onStepFinish(currentStep));
3314
- if (stepType === "continue") {
3315
- const lastMessage = currentPrompt.messages[currentPrompt.messages.length - 1];
3316
- if (typeof lastMessage.content === "string") {
3317
- lastMessage.content = text;
3318
- } else {
3319
- lastMessage.content.push({
3320
- text: stepText,
3321
- type: "text"
3322
- });
3323
- }
3324
- responseMessages[responseMessages.length - 1] = lastMessage;
3325
- currentPrompt.messages[currentPrompt.messages.length - 1] = lastMessage;
3326
- } else {
3327
- const newResponseMessages = toResponseMessages({
3328
- text,
3329
- toolCalls: currentToolCalls,
3330
- toolResults: currentToolResults
3331
- });
3332
- responseMessages.push(...newResponseMessages);
3333
- currentPrompt.messages.push(...newResponseMessages);
3334
- }
3302
+ steps.push(currentStepResult);
3303
+ await (onStepFinish == null ? void 0 : onStepFinish(currentStepResult));
3335
3304
  stepType = nextStepType;
3336
3305
  } while (stepType !== "done");
3337
3306
  span.setAttributes(
@@ -3365,9 +3334,11 @@ async function generateText({
3365
3334
  finishReason: currentModelResponse.finishReason,
3366
3335
  usage,
3367
3336
  warnings: currentModelResponse.warnings,
3337
+ request: (_f = currentModelResponse.request) != null ? _f : {},
3368
3338
  response: {
3369
3339
  ...currentModelResponse.response,
3370
- headers: (_e = currentModelResponse.rawResponse) == null ? void 0 : _e.headers
3340
+ headers: (_g = currentModelResponse.rawResponse) == null ? void 0 : _g.headers,
3341
+ messages: responseMessages
3371
3342
  },
3372
3343
  logprobs: currentModelResponse.logprobs,
3373
3344
  responseMessages,
@@ -3445,6 +3416,7 @@ var DefaultGenerateTextResult = class {
3445
3416
  this.finishReason = options.finishReason;
3446
3417
  this.usage = options.usage;
3447
3418
  this.warnings = options.warnings;
3419
+ this.request = options.request;
3448
3420
  this.response = options.response;
3449
3421
  this.responseMessages = options.responseMessages;
3450
3422
  this.roundtrips = options.steps;
@@ -3802,7 +3774,7 @@ function runToolsTransformation({
3802
3774
  }
3803
3775
 
3804
3776
  // core/generate-text/stream-text.ts
3805
- var originalGenerateId4 = createIdGenerator4({ prefix: "aitxt-", size: 24 });
3777
+ var originalGenerateId4 = createIdGenerator4({ prefix: "aitxt", size: 24 });
3806
3778
  async function streamText({
3807
3779
  model,
3808
3780
  tools,
@@ -3844,6 +3816,7 @@ async function streamText({
3844
3816
  settings: { ...settings, maxRetries }
3845
3817
  });
3846
3818
  const tracer = getTracer(telemetry);
3819
+ const initialPrompt = standardizePrompt({ system, prompt, messages });
3847
3820
  return recordSpan({
3848
3821
  name: "ai.streamText",
3849
3822
  attributes: selectTelemetryAttributes({
@@ -3863,14 +3836,20 @@ async function streamText({
3863
3836
  fn: async (rootSpan) => {
3864
3837
  const retry = retryWithExponentialBackoff({ maxRetries });
3865
3838
  const startStep = async ({
3866
- currentPrompt: currentPrompt2
3839
+ responseMessages
3867
3840
  }) => {
3841
+ const promptFormat = responseMessages.length === 0 ? initialPrompt.type : "messages";
3868
3842
  const promptMessages = await convertToLanguageModelPrompt({
3869
- prompt: currentPrompt2,
3870
- modelSupportsImageUrls: model.supportsImageUrls
3843
+ prompt: {
3844
+ type: promptFormat,
3845
+ system: initialPrompt.system,
3846
+ messages: [...initialPrompt.messages, ...responseMessages]
3847
+ },
3848
+ modelSupportsImageUrls: model.supportsImageUrls,
3849
+ modelSupportsUrl: model.supportsUrl
3871
3850
  });
3872
3851
  const {
3873
- result: { stream: stream2, warnings: warnings2, rawResponse: rawResponse2 },
3852
+ result: { stream: stream2, warnings: warnings2, rawResponse: rawResponse2, request: request2 },
3874
3853
  doStreamSpan: doStreamSpan2,
3875
3854
  startTimestampMs: startTimestampMs2
3876
3855
  } = await retry(
@@ -3885,7 +3864,7 @@ async function streamText({
3885
3864
  }),
3886
3865
  ...baseTelemetryAttributes,
3887
3866
  "ai.prompt.format": {
3888
- input: () => currentPrompt2.type
3867
+ input: () => promptFormat
3889
3868
  },
3890
3869
  "ai.prompt.messages": {
3891
3870
  input: () => JSON.stringify(promptMessages)
@@ -3918,7 +3897,7 @@ async function streamText({
3918
3897
  })
3919
3898
  },
3920
3899
  ...prepareCallSettings(settings),
3921
- inputFormat: currentPrompt2.type,
3900
+ inputFormat: promptFormat,
3922
3901
  prompt: promptMessages,
3923
3902
  providerMetadata,
3924
3903
  abortSignal,
@@ -3938,22 +3917,23 @@ async function streamText({
3938
3917
  abortSignal
3939
3918
  }),
3940
3919
  warnings: warnings2,
3920
+ request: request2 != null ? request2 : {},
3941
3921
  rawResponse: rawResponse2
3942
3922
  },
3943
3923
  doStreamSpan: doStreamSpan2,
3944
3924
  startTimestampMs: startTimestampMs2
3945
3925
  };
3946
3926
  };
3947
- const currentPrompt = standardizePrompt({ system, prompt, messages });
3948
3927
  const {
3949
- result: { stream, warnings, rawResponse },
3928
+ result: { stream, warnings, rawResponse, request },
3950
3929
  doStreamSpan,
3951
3930
  startTimestampMs
3952
- } = await startStep({ currentPrompt });
3931
+ } = await startStep({ responseMessages: [] });
3953
3932
  return new DefaultStreamTextResult({
3954
3933
  stream,
3955
3934
  warnings,
3956
3935
  rawResponse,
3936
+ request,
3957
3937
  onChunk,
3958
3938
  onFinish,
3959
3939
  onStepFinish,
@@ -3964,11 +3944,11 @@ async function streamText({
3964
3944
  maxSteps,
3965
3945
  continueSteps,
3966
3946
  startStep,
3967
- currentPrompt,
3968
3947
  modelId: model.modelId,
3969
3948
  now: now2,
3970
3949
  currentDate,
3971
- generateId: generateId3
3950
+ generateId: generateId3,
3951
+ tools
3972
3952
  });
3973
3953
  }
3974
3954
  });
@@ -3978,6 +3958,7 @@ var DefaultStreamTextResult = class {
3978
3958
  stream,
3979
3959
  warnings,
3980
3960
  rawResponse,
3961
+ request,
3981
3962
  onChunk,
3982
3963
  onFinish,
3983
3964
  onStepFinish,
@@ -3988,11 +3969,11 @@ var DefaultStreamTextResult = class {
3988
3969
  maxSteps,
3989
3970
  continueSteps,
3990
3971
  startStep,
3991
- currentPrompt,
3992
3972
  modelId,
3993
3973
  now: now2,
3994
3974
  currentDate,
3995
- generateId: generateId3
3975
+ generateId: generateId3,
3976
+ tools
3996
3977
  }) {
3997
3978
  this.warnings = warnings;
3998
3979
  this.rawResponse = rawResponse;
@@ -4013,6 +3994,8 @@ var DefaultStreamTextResult = class {
4013
3994
  promise: providerMetadataPromise
4014
3995
  } = createResolvablePromise();
4015
3996
  this.experimental_providerMetadata = providerMetadataPromise;
3997
+ const { resolve: resolveRequest, promise: requestPromise } = createResolvablePromise();
3998
+ this.request = requestPromise;
4016
3999
  const { resolve: resolveResponse, promise: responsePromise } = createResolvablePromise();
4017
4000
  this.response = responsePromise;
4018
4001
  const {
@@ -4033,14 +4016,15 @@ var DefaultStreamTextResult = class {
4033
4016
  startTimestamp,
4034
4017
  doStreamSpan: doStreamSpan2,
4035
4018
  currentStep,
4036
- currentPrompt: currentPrompt2,
4019
+ responseMessages,
4037
4020
  usage = {
4038
4021
  promptTokens: 0,
4039
4022
  completionTokens: 0,
4040
4023
  totalTokens: 0
4041
4024
  },
4042
4025
  stepType,
4043
- previousStepText = ""
4026
+ previousStepText = "",
4027
+ stepRequest
4044
4028
  }) {
4045
4029
  const stepToolCalls = [];
4046
4030
  const stepToolResults = [];
@@ -4167,6 +4151,7 @@ var DefaultStreamTextResult = class {
4167
4151
  },
4168
4152
  // invoke onFinish callback and resolve toolResults promise when the stream is about to close:
4169
4153
  async flush(controller) {
4154
+ var _a11;
4170
4155
  const stepToolCallsJson = stepToolCalls.length > 0 ? JSON.stringify(stepToolCalls) : void 0;
4171
4156
  let nextStepType = "done";
4172
4157
  if (currentStep + 1 < maxSteps) {
@@ -4232,10 +4217,32 @@ var DefaultStreamTextResult = class {
4232
4217
  usage: stepUsage,
4233
4218
  experimental_providerMetadata: stepProviderMetadata,
4234
4219
  logprobs: stepLogProbs,
4235
- response: stepResponse,
4220
+ response: {
4221
+ ...stepResponse
4222
+ },
4236
4223
  isContinued: nextStepType === "continue"
4237
4224
  });
4238
- const stepResult = {
4225
+ if (stepType === "continue") {
4226
+ const lastMessage = responseMessages[responseMessages.length - 1];
4227
+ if (typeof lastMessage.content === "string") {
4228
+ lastMessage.content = stepText;
4229
+ } else {
4230
+ lastMessage.content.push({
4231
+ text: stepText,
4232
+ type: "text"
4233
+ });
4234
+ }
4235
+ } else {
4236
+ responseMessages.push(
4237
+ ...toResponseMessages({
4238
+ text: stepText,
4239
+ tools: tools != null ? tools : {},
4240
+ toolCalls: stepToolCalls,
4241
+ toolResults: stepToolResults
4242
+ })
4243
+ );
4244
+ }
4245
+ const currentStepResult = {
4239
4246
  stepType,
4240
4247
  text: stepText,
4241
4248
  toolCalls: stepToolCalls,
@@ -4244,49 +4251,30 @@ var DefaultStreamTextResult = class {
4244
4251
  usage: stepUsage,
4245
4252
  warnings: self.warnings,
4246
4253
  logprobs: stepLogProbs,
4247
- response: stepResponse,
4254
+ request: stepRequest,
4248
4255
  rawResponse: self.rawResponse,
4256
+ response: {
4257
+ ...stepResponse,
4258
+ headers: (_a11 = self.rawResponse) == null ? void 0 : _a11.headers,
4259
+ // deep clone msgs to avoid mutating past messages in multi-step:
4260
+ messages: JSON.parse(JSON.stringify(responseMessages))
4261
+ },
4249
4262
  experimental_providerMetadata: stepProviderMetadata,
4250
4263
  isContinued: nextStepType === "continue"
4251
4264
  };
4252
- stepResults.push(stepResult);
4253
- await (onStepFinish == null ? void 0 : onStepFinish(stepResult));
4265
+ stepResults.push(currentStepResult);
4266
+ await (onStepFinish == null ? void 0 : onStepFinish(currentStepResult));
4254
4267
  const combinedUsage = {
4255
4268
  promptTokens: usage.promptTokens + stepUsage.promptTokens,
4256
4269
  completionTokens: usage.completionTokens + stepUsage.completionTokens,
4257
4270
  totalTokens: usage.totalTokens + stepUsage.totalTokens
4258
4271
  };
4259
4272
  if (nextStepType !== "done") {
4260
- if (stepType === "continue") {
4261
- const lastMessage = currentPrompt2.messages[currentPrompt2.messages.length - 1];
4262
- if (typeof lastMessage.content === "string") {
4263
- lastMessage.content = stepText;
4264
- } else {
4265
- lastMessage.content.push({
4266
- text: stepText,
4267
- type: "text"
4268
- });
4269
- }
4270
- currentPrompt2.messages[currentPrompt2.messages.length - 1] = lastMessage;
4271
- } else {
4272
- const newResponseMessages = toResponseMessages({
4273
- text: stepText,
4274
- toolCalls: stepToolCalls,
4275
- toolResults: stepToolResults
4276
- });
4277
- currentPrompt2.messages.push(...newResponseMessages);
4278
- }
4279
4273
  const {
4280
4274
  result,
4281
4275
  doStreamSpan: doStreamSpan3,
4282
4276
  startTimestampMs: startTimestamp2
4283
- } = await startStep({
4284
- currentPrompt: {
4285
- type: "messages",
4286
- system: currentPrompt2.system,
4287
- messages: currentPrompt2.messages
4288
- }
4289
- });
4277
+ } = await startStep({ responseMessages });
4290
4278
  self.warnings = result.warnings;
4291
4279
  self.rawResponse = result.rawResponse;
4292
4280
  addStepStream({
@@ -4294,10 +4282,11 @@ var DefaultStreamTextResult = class {
4294
4282
  startTimestamp: startTimestamp2,
4295
4283
  doStreamSpan: doStreamSpan3,
4296
4284
  currentStep: currentStep + 1,
4297
- currentPrompt: currentPrompt2,
4285
+ responseMessages,
4298
4286
  usage: combinedUsage,
4299
4287
  stepType: nextStepType,
4300
- previousStepText: fullStepText
4288
+ previousStepText: fullStepText,
4289
+ stepRequest: result.request
4301
4290
  });
4302
4291
  return;
4303
4292
  }
@@ -4308,7 +4297,9 @@ var DefaultStreamTextResult = class {
4308
4297
  usage: combinedUsage,
4309
4298
  experimental_providerMetadata: stepProviderMetadata,
4310
4299
  logprobs: stepLogProbs,
4311
- response: stepResponse
4300
+ response: {
4301
+ ...stepResponse
4302
+ }
4312
4303
  });
4313
4304
  closeStitchableStream();
4314
4305
  rootSpan.setAttributes(
@@ -4331,34 +4322,13 @@ var DefaultStreamTextResult = class {
4331
4322
  }
4332
4323
  })
4333
4324
  );
4334
- const responseMessages = stepResults.reduce((responseMessages2, step) => {
4335
- if (step.stepType === "continue") {
4336
- const lastResponseMessage = responseMessages2.pop();
4337
- if (typeof lastResponseMessage.content === "string") {
4338
- lastResponseMessage.content += step.text;
4339
- } else {
4340
- lastResponseMessage.content.push({
4341
- text: step.text,
4342
- type: "text"
4343
- });
4344
- }
4345
- return [...responseMessages2, lastResponseMessage];
4346
- }
4347
- return [
4348
- ...responseMessages2,
4349
- ...toResponseMessages({
4350
- text: step.text,
4351
- toolCalls: step.toolCalls,
4352
- toolResults: step.toolResults
4353
- })
4354
- ];
4355
- }, []);
4356
4325
  resolveUsage(combinedUsage);
4357
4326
  resolveFinishReason(stepFinishReason);
4358
4327
  resolveText(fullStepText);
4359
4328
  resolveToolCalls(stepToolCalls);
4360
4329
  resolveProviderMetadata(stepProviderMetadata);
4361
4330
  resolveToolResults(stepToolResults);
4331
+ resolveRequest(stepRequest);
4362
4332
  resolveResponse({
4363
4333
  ...stepResponse,
4364
4334
  headers: rawResponse == null ? void 0 : rawResponse.headers
@@ -4376,10 +4346,12 @@ var DefaultStreamTextResult = class {
4376
4346
  // optional as well. Therefore we need to cast the toolResults to any.
4377
4347
  // The type exposed to the users will be correctly inferred.
4378
4348
  toolResults: stepToolResults,
4349
+ request: stepRequest,
4379
4350
  rawResponse,
4380
4351
  response: {
4381
4352
  ...stepResponse,
4382
- headers: rawResponse == null ? void 0 : rawResponse.headers
4353
+ headers: rawResponse == null ? void 0 : rawResponse.headers,
4354
+ messages: responseMessages
4383
4355
  },
4384
4356
  warnings,
4385
4357
  experimental_providerMetadata: stepProviderMetadata,
@@ -4401,9 +4373,10 @@ var DefaultStreamTextResult = class {
4401
4373
  startTimestamp: startTimestampMs,
4402
4374
  doStreamSpan,
4403
4375
  currentStep: 0,
4404
- currentPrompt,
4376
+ responseMessages: [],
4405
4377
  usage: void 0,
4406
- stepType: "initial"
4378
+ stepType: "initial",
4379
+ stepRequest: request
4407
4380
  });
4408
4381
  }
4409
4382
  /**
@@ -4442,7 +4415,7 @@ var DefaultStreamTextResult = class {
4442
4415
  }
4443
4416
  toDataStreamInternal({
4444
4417
  callbacks = {},
4445
- getErrorMessage: getErrorMessage4 = () => "",
4418
+ getErrorMessage: getErrorMessage3 = () => "",
4446
4419
  // mask error messages for safety by default
4447
4420
  sendUsage = true
4448
4421
  } = {}) {
@@ -4517,7 +4490,7 @@ var DefaultStreamTextResult = class {
4517
4490
  }
4518
4491
  case "error": {
4519
4492
  controller.enqueue(
4520
- formatStreamPart("error", getErrorMessage4(chunk.error))
4493
+ formatStreamPart("error", getErrorMessage3(chunk.error))
4521
4494
  );
4522
4495
  break;
4523
4496
  }
@@ -4565,7 +4538,7 @@ var DefaultStreamTextResult = class {
4565
4538
  statusText: "statusText" in options ? options.statusText : void 0
4566
4539
  };
4567
4540
  const data = options == null ? void 0 : "data" in options ? options.data : void 0;
4568
- const getErrorMessage4 = options == null ? void 0 : "getErrorMessage" in options ? options.getErrorMessage : void 0;
4541
+ const getErrorMessage3 = options == null ? void 0 : "getErrorMessage" in options ? options.getErrorMessage : void 0;
4569
4542
  const sendUsage = options == null ? void 0 : "sendUsage" in options ? options.sendUsage : void 0;
4570
4543
  writeToServerResponse({
4571
4544
  response,
@@ -4575,7 +4548,7 @@ var DefaultStreamTextResult = class {
4575
4548
  contentType: "text/plain; charset=utf-8",
4576
4549
  dataStreamVersion: "v1"
4577
4550
  }),
4578
- stream: this.toDataStream({ data, getErrorMessage: getErrorMessage4, sendUsage })
4551
+ stream: this.toDataStream({ data, getErrorMessage: getErrorMessage3, sendUsage })
4579
4552
  });
4580
4553
  }
4581
4554
  pipeTextStreamToResponse(response, init) {
@@ -4607,10 +4580,10 @@ var DefaultStreamTextResult = class {
4607
4580
  statusText: "statusText" in options ? options.statusText : void 0
4608
4581
  };
4609
4582
  const data = options == null ? void 0 : "data" in options ? options.data : void 0;
4610
- const getErrorMessage4 = options == null ? void 0 : "getErrorMessage" in options ? options.getErrorMessage : void 0;
4583
+ const getErrorMessage3 = options == null ? void 0 : "getErrorMessage" in options ? options.getErrorMessage : void 0;
4611
4584
  const sendUsage = options == null ? void 0 : "sendUsage" in options ? options.sendUsage : void 0;
4612
4585
  return new Response(
4613
- this.toDataStream({ data, getErrorMessage: getErrorMessage4, sendUsage }),
4586
+ this.toDataStream({ data, getErrorMessage: getErrorMessage3, sendUsage }),
4614
4587
  {
4615
4588
  status: (_a11 = init == null ? void 0 : init.status) != null ? _a11 : 200,
4616
4589
  statusText: init == null ? void 0 : init.statusText,
@@ -4652,6 +4625,7 @@ var experimental_wrapLanguageModel = ({
4652
4625
  modelId: modelId != null ? modelId : model.modelId,
4653
4626
  defaultObjectGenerationMode: model.defaultObjectGenerationMode,
4654
4627
  supportsImageUrls: model.supportsImageUrls,
4628
+ supportsUrl: model.supportsUrl,
4655
4629
  supportsStructuredOutputs: model.supportsStructuredOutputs,
4656
4630
  async doGenerate(params) {
4657
4631
  const transformedParams = await doTransform({ params, type: "generate" });
@@ -4775,7 +4749,7 @@ function convertToCoreMessages(messages) {
4775
4749
  role: "assistant",
4776
4750
  content: [
4777
4751
  { type: "text", text: content },
4778
- ...toolInvocations.filter((invocation) => invocation.state !== "partial-call").map(({ toolCallId, toolName, args }) => ({
4752
+ ...toolInvocations.map(({ toolCallId, toolName, args }) => ({
4779
4753
  type: "tool-call",
4780
4754
  toolCallId,
4781
4755
  toolName,
@@ -4783,19 +4757,25 @@ function convertToCoreMessages(messages) {
4783
4757
  }))
4784
4758
  ]
4785
4759
  });
4786
- const toolResults = toolInvocations.filter((invocation) => invocation.state === "result").map(({ toolCallId, toolName, args, result }) => ({
4787
- type: "tool-result",
4788
- toolCallId,
4789
- toolName,
4790
- args,
4791
- result
4792
- }));
4793
- if (toolResults.length > 0) {
4794
- coreMessages.push({
4795
- role: "tool",
4796
- content: toolResults
4797
- });
4798
- }
4760
+ coreMessages.push({
4761
+ role: "tool",
4762
+ content: toolInvocations.map((ToolInvocation) => {
4763
+ if (!("result" in ToolInvocation)) {
4764
+ throw new MessageConversionError({
4765
+ originalMessage: message,
4766
+ message: "ToolInvocation must have a result: " + JSON.stringify(ToolInvocation)
4767
+ });
4768
+ }
4769
+ const { toolCallId, toolName, args, result } = ToolInvocation;
4770
+ return {
4771
+ type: "tool-result",
4772
+ toolCallId,
4773
+ toolName,
4774
+ args,
4775
+ result
4776
+ };
4777
+ })
4778
+ });
4799
4779
  break;
4800
4780
  }
4801
4781
  case "function":