ai 3.4.18 → 3.4.20

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -593,12 +593,6 @@ var DefaultEmbedManyResult = class {
593
593
  // core/generate-object/generate-object.ts
594
594
  import { createIdGenerator, safeParseJSON } from "@ai-sdk/provider-utils";
595
595
 
596
- // core/prompt/convert-to-language-model-prompt.ts
597
- import {
598
- convertUint8ArrayToBase64 as convertUint8ArrayToBase642,
599
- getErrorMessage as getErrorMessage2
600
- } from "@ai-sdk/provider-utils";
601
-
602
596
  // util/download-error.ts
603
597
  import { AISDKError as AISDKError2 } from "@ai-sdk/provider";
604
598
  var name2 = "AI_DownloadError";
@@ -844,19 +838,21 @@ function splitDataUrl(dataUrl) {
844
838
  async function convertToLanguageModelPrompt({
845
839
  prompt,
846
840
  modelSupportsImageUrls = true,
841
+ modelSupportsUrl = () => false,
847
842
  downloadImplementation = download
848
843
  }) {
849
- const downloadedAssets = modelSupportsImageUrls || prompt.messages == null ? null : await downloadAssets(prompt.messages, downloadImplementation);
850
- const languageModelMessages = [];
851
- if (prompt.system != null) {
852
- languageModelMessages.push({ role: "system", content: prompt.system });
853
- }
854
- languageModelMessages.push(
844
+ const downloadedAssets = await downloadAssets(
845
+ prompt.messages,
846
+ downloadImplementation,
847
+ modelSupportsImageUrls,
848
+ modelSupportsUrl
849
+ );
850
+ return [
851
+ ...prompt.system != null ? [{ role: "system", content: prompt.system }] : [],
855
852
  ...prompt.messages.map(
856
853
  (message) => convertToLanguageModelMessage(message, downloadedAssets)
857
854
  )
858
- );
859
- return languageModelMessages;
855
+ ];
860
856
  }
861
857
  function convertToLanguageModelMessage(message, downloadedAssets) {
862
858
  const role = message.role;
@@ -878,178 +874,7 @@ function convertToLanguageModelMessage(message, downloadedAssets) {
878
874
  }
879
875
  return {
880
876
  role: "user",
881
- content: message.content.map(
882
- (part) => {
883
- var _a11, _b, _c, _d, _e;
884
- switch (part.type) {
885
- case "text": {
886
- return {
887
- type: "text",
888
- text: part.text,
889
- providerMetadata: part.experimental_providerMetadata
890
- };
891
- }
892
- case "image": {
893
- if (part.image instanceof URL) {
894
- if (downloadedAssets == null) {
895
- return {
896
- type: "image",
897
- image: part.image,
898
- mimeType: part.mimeType,
899
- providerMetadata: part.experimental_providerMetadata
900
- };
901
- } else {
902
- const downloadedImage = downloadedAssets[part.image.toString()];
903
- return {
904
- type: "image",
905
- image: downloadedImage.data,
906
- mimeType: (_a11 = part.mimeType) != null ? _a11 : downloadedImage.mimeType,
907
- providerMetadata: part.experimental_providerMetadata
908
- };
909
- }
910
- }
911
- if (typeof part.image === "string") {
912
- try {
913
- const url = new URL(part.image);
914
- switch (url.protocol) {
915
- case "http:":
916
- case "https:": {
917
- if (downloadedAssets == null) {
918
- return {
919
- type: "image",
920
- image: url,
921
- mimeType: part.mimeType,
922
- providerMetadata: part.experimental_providerMetadata
923
- };
924
- } else {
925
- const downloadedImage = downloadedAssets[url.toString()];
926
- return {
927
- type: "image",
928
- image: downloadedImage.data,
929
- mimeType: (_b = part.mimeType) != null ? _b : downloadedImage.mimeType,
930
- providerMetadata: part.experimental_providerMetadata
931
- };
932
- }
933
- }
934
- case "data:": {
935
- try {
936
- const { mimeType, base64Content } = splitDataUrl(
937
- part.image
938
- );
939
- if (mimeType == null || base64Content == null) {
940
- throw new Error("Invalid data URL format");
941
- }
942
- return {
943
- type: "image",
944
- image: convertDataContentToUint8Array(base64Content),
945
- mimeType,
946
- providerMetadata: part.experimental_providerMetadata
947
- };
948
- } catch (error) {
949
- throw new Error(
950
- `Error processing data URL: ${getErrorMessage2(
951
- message
952
- )}`
953
- );
954
- }
955
- }
956
- }
957
- } catch (_ignored) {
958
- }
959
- }
960
- const imageUint8 = convertDataContentToUint8Array(part.image);
961
- return {
962
- type: "image",
963
- image: imageUint8,
964
- mimeType: (_c = part.mimeType) != null ? _c : detectImageMimeType(imageUint8),
965
- providerMetadata: part.experimental_providerMetadata
966
- };
967
- }
968
- case "file": {
969
- if (part.data instanceof URL) {
970
- if (downloadedAssets == null) {
971
- return {
972
- type: "file",
973
- data: part.data,
974
- mimeType: part.mimeType,
975
- providerMetadata: part.experimental_providerMetadata
976
- };
977
- } else {
978
- const downloadedImage = downloadedAssets[part.data.toString()];
979
- return {
980
- type: "file",
981
- data: convertUint8ArrayToBase642(downloadedImage.data),
982
- mimeType: (_d = part.mimeType) != null ? _d : downloadedImage.mimeType,
983
- providerMetadata: part.experimental_providerMetadata
984
- };
985
- }
986
- }
987
- if (typeof part.data === "string") {
988
- try {
989
- const url = new URL(part.data);
990
- switch (url.protocol) {
991
- case "http:":
992
- case "https:": {
993
- if (downloadedAssets == null) {
994
- return {
995
- type: "file",
996
- data: url,
997
- mimeType: part.mimeType,
998
- providerMetadata: part.experimental_providerMetadata
999
- };
1000
- } else {
1001
- const downloadedImage = downloadedAssets[url.toString()];
1002
- return {
1003
- type: "file",
1004
- data: convertUint8ArrayToBase642(
1005
- downloadedImage.data
1006
- ),
1007
- mimeType: (_e = part.mimeType) != null ? _e : downloadedImage.mimeType,
1008
- providerMetadata: part.experimental_providerMetadata
1009
- };
1010
- }
1011
- }
1012
- case "data:": {
1013
- try {
1014
- const { mimeType, base64Content } = splitDataUrl(
1015
- part.data
1016
- );
1017
- if (mimeType == null || base64Content == null) {
1018
- throw new Error("Invalid data URL format");
1019
- }
1020
- return {
1021
- type: "file",
1022
- data: convertDataContentToBase64String(
1023
- base64Content
1024
- ),
1025
- mimeType,
1026
- providerMetadata: part.experimental_providerMetadata
1027
- };
1028
- } catch (error) {
1029
- throw new Error(
1030
- `Error processing data URL: ${getErrorMessage2(
1031
- message
1032
- )}`
1033
- );
1034
- }
1035
- }
1036
- }
1037
- } catch (_ignored) {
1038
- }
1039
- }
1040
- const imageBase64 = convertDataContentToBase64String(
1041
- part.data
1042
- );
1043
- return {
1044
- type: "file",
1045
- data: imageBase64,
1046
- mimeType: part.mimeType,
1047
- providerMetadata: part.experimental_providerMetadata
1048
- };
1049
- }
1050
- }
1051
- }
1052
- ).filter((part) => part.type !== "text" || part.text !== ""),
877
+ content: message.content.map((part) => convertPartToLanguageModelPart(part, downloadedAssets)).filter((part) => part.type !== "text" || part.text !== ""),
1053
878
  providerMetadata: message.experimental_providerMetadata
1054
879
  };
1055
880
  }
@@ -1084,6 +909,8 @@ function convertToLanguageModelMessage(message, downloadedAssets) {
1084
909
  toolCallId: part.toolCallId,
1085
910
  toolName: part.toolName,
1086
911
  result: part.result,
912
+ content: part.experimental_content,
913
+ isError: part.isError,
1087
914
  providerMetadata: part.experimental_providerMetadata
1088
915
  })),
1089
916
  providerMetadata: message.experimental_providerMetadata
@@ -1095,17 +922,19 @@ function convertToLanguageModelMessage(message, downloadedAssets) {
1095
922
  }
1096
923
  }
1097
924
  }
1098
- async function downloadAssets(messages, downloadImplementation) {
925
+ async function downloadAssets(messages, downloadImplementation, modelSupportsImageUrls, modelSupportsUrl) {
1099
926
  const urls = messages.filter((message) => message.role === "user").map((message) => message.content).filter(
1100
927
  (content) => Array.isArray(content)
1101
928
  ).flat().filter(
1102
929
  (part) => part.type === "image" || part.type === "file"
930
+ ).filter(
931
+ (part) => !(part.type === "image" && modelSupportsImageUrls === true)
1103
932
  ).map((part) => part.type === "image" ? part.image : part.data).map(
1104
933
  (part) => (
1105
934
  // support string urls:
1106
935
  typeof part === "string" && (part.startsWith("http:") || part.startsWith("https:")) ? new URL(part) : part
1107
936
  )
1108
- ).filter((image) => image instanceof URL);
937
+ ).filter((image) => image instanceof URL).filter((url) => !modelSupportsUrl(url));
1109
938
  const downloadedImages = await Promise.all(
1110
939
  urls.map(async (url) => ({
1111
940
  url,
@@ -1116,6 +945,79 @@ async function downloadAssets(messages, downloadImplementation) {
1116
945
  downloadedImages.map(({ url, data }) => [url.toString(), data])
1117
946
  );
1118
947
  }
948
+ function convertPartToLanguageModelPart(part, downloadedAssets) {
949
+ if (part.type === "text") {
950
+ return {
951
+ type: "text",
952
+ text: part.text,
953
+ providerMetadata: part.experimental_providerMetadata
954
+ };
955
+ }
956
+ let mimeType = part.mimeType;
957
+ let data;
958
+ let content;
959
+ let normalizedData;
960
+ const type = part.type;
961
+ switch (type) {
962
+ case "image":
963
+ data = part.image;
964
+ break;
965
+ case "file":
966
+ data = part.data;
967
+ break;
968
+ default:
969
+ throw new Error(`Unsupported part type: ${type}`);
970
+ }
971
+ try {
972
+ content = typeof data === "string" ? new URL(data) : data;
973
+ } catch (error) {
974
+ content = data;
975
+ }
976
+ if (content instanceof URL) {
977
+ if (content.protocol === "data:") {
978
+ const { mimeType: dataUrlMimeType, base64Content } = splitDataUrl(
979
+ content.toString()
980
+ );
981
+ if (dataUrlMimeType == null || base64Content == null) {
982
+ throw new Error(`Invalid data URL format in part ${type}`);
983
+ }
984
+ mimeType = dataUrlMimeType;
985
+ normalizedData = convertDataContentToUint8Array(base64Content);
986
+ } else {
987
+ const downloadedFile = downloadedAssets[content.toString()];
988
+ if (downloadedFile) {
989
+ normalizedData = downloadedFile.data;
990
+ mimeType != null ? mimeType : mimeType = downloadedFile.mimeType;
991
+ } else {
992
+ normalizedData = content;
993
+ }
994
+ }
995
+ } else {
996
+ normalizedData = convertDataContentToUint8Array(content);
997
+ }
998
+ switch (type) {
999
+ case "image":
1000
+ if (mimeType == null && normalizedData instanceof Uint8Array) {
1001
+ mimeType = detectImageMimeType(normalizedData);
1002
+ }
1003
+ return {
1004
+ type: "image",
1005
+ image: normalizedData,
1006
+ mimeType,
1007
+ providerMetadata: part.experimental_providerMetadata
1008
+ };
1009
+ case "file":
1010
+ if (mimeType == null) {
1011
+ throw new Error(`Mime type is missing for file part`);
1012
+ }
1013
+ return {
1014
+ type: "file",
1015
+ data: normalizedData instanceof Uint8Array ? convertDataContentToBase64String(normalizedData) : normalizedData,
1016
+ mimeType,
1017
+ providerMetadata: part.experimental_providerMetadata
1018
+ };
1019
+ }
1020
+ }
1119
1021
 
1120
1022
  // errors/invalid-argument-error.ts
1121
1023
  import { AISDKError as AISDKError5 } from "@ai-sdk/provider";
@@ -1272,10 +1174,10 @@ function prepareCallSettings({
1272
1174
  // core/prompt/standardize-prompt.ts
1273
1175
  import { InvalidPromptError } from "@ai-sdk/provider";
1274
1176
  import { safeValidateTypes } from "@ai-sdk/provider-utils";
1275
- import { z as z6 } from "zod";
1177
+ import { z as z7 } from "zod";
1276
1178
 
1277
1179
  // core/prompt/message.ts
1278
- import { z as z5 } from "zod";
1180
+ import { z as z6 } from "zod";
1279
1181
 
1280
1182
  // core/types/provider-metadata.ts
1281
1183
  import { z as z3 } from "zod";
@@ -1300,67 +1202,83 @@ var providerMetadataSchema = z3.record(
1300
1202
  );
1301
1203
 
1302
1204
  // core/prompt/content-part.ts
1205
+ import { z as z5 } from "zod";
1206
+
1207
+ // core/prompt/tool-result-content.ts
1303
1208
  import { z as z4 } from "zod";
1304
- var textPartSchema = z4.object({
1305
- type: z4.literal("text"),
1306
- text: z4.string(),
1209
+ var toolResultContentSchema = z4.array(
1210
+ z4.union([
1211
+ z4.object({ type: z4.literal("text"), text: z4.string() }),
1212
+ z4.object({
1213
+ type: z4.literal("image"),
1214
+ data: z4.string(),
1215
+ mimeType: z4.string().optional()
1216
+ })
1217
+ ])
1218
+ );
1219
+
1220
+ // core/prompt/content-part.ts
1221
+ var textPartSchema = z5.object({
1222
+ type: z5.literal("text"),
1223
+ text: z5.string(),
1307
1224
  experimental_providerMetadata: providerMetadataSchema.optional()
1308
1225
  });
1309
- var imagePartSchema = z4.object({
1310
- type: z4.literal("image"),
1311
- image: z4.union([dataContentSchema, z4.instanceof(URL)]),
1312
- mimeType: z4.string().optional(),
1226
+ var imagePartSchema = z5.object({
1227
+ type: z5.literal("image"),
1228
+ image: z5.union([dataContentSchema, z5.instanceof(URL)]),
1229
+ mimeType: z5.string().optional(),
1313
1230
  experimental_providerMetadata: providerMetadataSchema.optional()
1314
1231
  });
1315
- var filePartSchema = z4.object({
1316
- type: z4.literal("file"),
1317
- data: z4.union([dataContentSchema, z4.instanceof(URL)]),
1318
- mimeType: z4.string(),
1232
+ var filePartSchema = z5.object({
1233
+ type: z5.literal("file"),
1234
+ data: z5.union([dataContentSchema, z5.instanceof(URL)]),
1235
+ mimeType: z5.string(),
1319
1236
  experimental_providerMetadata: providerMetadataSchema.optional()
1320
1237
  });
1321
- var toolCallPartSchema = z4.object({
1322
- type: z4.literal("tool-call"),
1323
- toolCallId: z4.string(),
1324
- toolName: z4.string(),
1325
- args: z4.unknown()
1238
+ var toolCallPartSchema = z5.object({
1239
+ type: z5.literal("tool-call"),
1240
+ toolCallId: z5.string(),
1241
+ toolName: z5.string(),
1242
+ args: z5.unknown()
1326
1243
  });
1327
- var toolResultPartSchema = z4.object({
1328
- type: z4.literal("tool-result"),
1329
- toolCallId: z4.string(),
1330
- toolName: z4.string(),
1331
- result: z4.unknown(),
1332
- isError: z4.boolean().optional(),
1244
+ var toolResultPartSchema = z5.object({
1245
+ type: z5.literal("tool-result"),
1246
+ toolCallId: z5.string(),
1247
+ toolName: z5.string(),
1248
+ result: z5.unknown(),
1249
+ content: toolResultContentSchema.optional(),
1250
+ isError: z5.boolean().optional(),
1333
1251
  experimental_providerMetadata: providerMetadataSchema.optional()
1334
1252
  });
1335
1253
 
1336
1254
  // core/prompt/message.ts
1337
- var coreSystemMessageSchema = z5.object({
1338
- role: z5.literal("system"),
1339
- content: z5.string(),
1255
+ var coreSystemMessageSchema = z6.object({
1256
+ role: z6.literal("system"),
1257
+ content: z6.string(),
1340
1258
  experimental_providerMetadata: providerMetadataSchema.optional()
1341
1259
  });
1342
- var coreUserMessageSchema = z5.object({
1343
- role: z5.literal("user"),
1344
- content: z5.union([
1345
- z5.string(),
1346
- z5.array(z5.union([textPartSchema, imagePartSchema, filePartSchema]))
1260
+ var coreUserMessageSchema = z6.object({
1261
+ role: z6.literal("user"),
1262
+ content: z6.union([
1263
+ z6.string(),
1264
+ z6.array(z6.union([textPartSchema, imagePartSchema, filePartSchema]))
1347
1265
  ]),
1348
1266
  experimental_providerMetadata: providerMetadataSchema.optional()
1349
1267
  });
1350
- var coreAssistantMessageSchema = z5.object({
1351
- role: z5.literal("assistant"),
1352
- content: z5.union([
1353
- z5.string(),
1354
- z5.array(z5.union([textPartSchema, toolCallPartSchema]))
1268
+ var coreAssistantMessageSchema = z6.object({
1269
+ role: z6.literal("assistant"),
1270
+ content: z6.union([
1271
+ z6.string(),
1272
+ z6.array(z6.union([textPartSchema, toolCallPartSchema]))
1355
1273
  ]),
1356
1274
  experimental_providerMetadata: providerMetadataSchema.optional()
1357
1275
  });
1358
- var coreToolMessageSchema = z5.object({
1359
- role: z5.literal("tool"),
1360
- content: z5.array(toolResultPartSchema),
1276
+ var coreToolMessageSchema = z6.object({
1277
+ role: z6.literal("tool"),
1278
+ content: z6.array(toolResultPartSchema),
1361
1279
  experimental_providerMetadata: providerMetadataSchema.optional()
1362
1280
  });
1363
- var coreMessageSchema = z5.union([
1281
+ var coreMessageSchema = z6.union([
1364
1282
  coreSystemMessageSchema,
1365
1283
  coreUserMessageSchema,
1366
1284
  coreAssistantMessageSchema,
@@ -1408,7 +1326,7 @@ function standardizePrompt(prompt) {
1408
1326
  if (prompt.messages != null) {
1409
1327
  const validationResult = safeValidateTypes({
1410
1328
  value: prompt.messages,
1411
- schema: z6.array(coreMessageSchema)
1329
+ schema: z7.array(coreMessageSchema)
1412
1330
  });
1413
1331
  if (!validationResult.success) {
1414
1332
  throw new InvalidPromptError({
@@ -1873,7 +1791,7 @@ function validateObjectGenerationInput({
1873
1791
  }
1874
1792
 
1875
1793
  // core/generate-object/generate-object.ts
1876
- var originalGenerateId = createIdGenerator({ prefix: "aiobj-", size: 24 });
1794
+ var originalGenerateId = createIdGenerator({ prefix: "aiobj", size: 24 });
1877
1795
  async function generateObject({
1878
1796
  model,
1879
1797
  enum: enumValues,
@@ -1943,6 +1861,7 @@ async function generateObject({
1943
1861
  }),
1944
1862
  tracer,
1945
1863
  fn: async (span) => {
1864
+ var _a11, _b;
1946
1865
  const retry = retryWithExponentialBackoff({ maxRetries });
1947
1866
  if (mode === "auto" || mode == null) {
1948
1867
  mode = model.defaultObjectGenerationMode;
@@ -1953,6 +1872,7 @@ async function generateObject({
1953
1872
  let warnings;
1954
1873
  let rawResponse;
1955
1874
  let response;
1875
+ let request;
1956
1876
  let logprobs;
1957
1877
  let resultProviderMetadata;
1958
1878
  switch (mode) {
@@ -1967,7 +1887,8 @@ async function generateObject({
1967
1887
  });
1968
1888
  const promptMessages = await convertToLanguageModelPrompt({
1969
1889
  prompt: standardPrompt,
1970
- modelSupportsImageUrls: model.supportsImageUrls
1890
+ modelSupportsImageUrls: model.supportsImageUrls,
1891
+ modelSupportsUrl: model.supportsUrl
1971
1892
  });
1972
1893
  const generateResult = await retry(
1973
1894
  () => recordSpan({
@@ -2000,7 +1921,7 @@ async function generateObject({
2000
1921
  }),
2001
1922
  tracer,
2002
1923
  fn: async (span2) => {
2003
- var _a11, _b, _c, _d, _e, _f;
1924
+ var _a12, _b2, _c, _d, _e, _f;
2004
1925
  const result2 = await model.doGenerate({
2005
1926
  mode: {
2006
1927
  type: "object-json",
@@ -2019,7 +1940,7 @@ async function generateObject({
2019
1940
  throw new NoObjectGeneratedError();
2020
1941
  }
2021
1942
  const responseData = {
2022
- id: (_b = (_a11 = result2.response) == null ? void 0 : _a11.id) != null ? _b : generateId3(),
1943
+ id: (_b2 = (_a12 = result2.response) == null ? void 0 : _a12.id) != null ? _b2 : generateId3(),
2023
1944
  timestamp: (_d = (_c = result2.response) == null ? void 0 : _c.timestamp) != null ? _d : currentDate(),
2024
1945
  modelId: (_f = (_e = result2.response) == null ? void 0 : _e.modelId) != null ? _f : model.modelId
2025
1946
  };
@@ -2057,6 +1978,7 @@ async function generateObject({
2057
1978
  rawResponse = generateResult.rawResponse;
2058
1979
  logprobs = generateResult.logprobs;
2059
1980
  resultProviderMetadata = generateResult.providerMetadata;
1981
+ request = (_a11 = generateResult.request) != null ? _a11 : {};
2060
1982
  response = generateResult.responseData;
2061
1983
  break;
2062
1984
  }
@@ -2068,7 +1990,8 @@ async function generateObject({
2068
1990
  });
2069
1991
  const promptMessages = await convertToLanguageModelPrompt({
2070
1992
  prompt: validatedPrompt,
2071
- modelSupportsImageUrls: model.supportsImageUrls
1993
+ modelSupportsImageUrls: model.supportsImageUrls,
1994
+ modelSupportsUrl: model.supportsUrl
2072
1995
  });
2073
1996
  const inputFormat = validatedPrompt.type;
2074
1997
  const generateResult = await retry(
@@ -2102,7 +2025,7 @@ async function generateObject({
2102
2025
  }),
2103
2026
  tracer,
2104
2027
  fn: async (span2) => {
2105
- var _a11, _b, _c, _d, _e, _f, _g, _h;
2028
+ var _a12, _b2, _c, _d, _e, _f, _g, _h;
2106
2029
  const result2 = await model.doGenerate({
2107
2030
  mode: {
2108
2031
  type: "object-tool",
@@ -2120,7 +2043,7 @@ async function generateObject({
2120
2043
  abortSignal,
2121
2044
  headers
2122
2045
  });
2123
- const objectText = (_b = (_a11 = result2.toolCalls) == null ? void 0 : _a11[0]) == null ? void 0 : _b.args;
2046
+ const objectText = (_b2 = (_a12 = result2.toolCalls) == null ? void 0 : _a12[0]) == null ? void 0 : _b2.args;
2124
2047
  if (objectText === void 0) {
2125
2048
  throw new NoObjectGeneratedError();
2126
2049
  }
@@ -2163,6 +2086,7 @@ async function generateObject({
2163
2086
  rawResponse = generateResult.rawResponse;
2164
2087
  logprobs = generateResult.logprobs;
2165
2088
  resultProviderMetadata = generateResult.providerMetadata;
2089
+ request = (_b = generateResult.request) != null ? _b : {};
2166
2090
  response = generateResult.responseData;
2167
2091
  break;
2168
2092
  }
@@ -2209,6 +2133,7 @@ async function generateObject({
2209
2133
  finishReason,
2210
2134
  usage: calculateLanguageModelUsage(usage),
2211
2135
  warnings,
2136
+ request,
2212
2137
  response: {
2213
2138
  ...response,
2214
2139
  headers: rawResponse == null ? void 0 : rawResponse.headers
@@ -2227,6 +2152,7 @@ var DefaultGenerateObjectResult = class {
2227
2152
  this.warnings = options.warnings;
2228
2153
  this.experimental_providerMetadata = options.providerMetadata;
2229
2154
  this.response = options.response;
2155
+ this.request = options.request;
2230
2156
  this.rawResponse = {
2231
2157
  headers: options.response.headers
2232
2158
  };
@@ -2358,7 +2284,7 @@ function writeToServerResponse({
2358
2284
  }
2359
2285
 
2360
2286
  // core/generate-object/stream-object.ts
2361
- var originalGenerateId2 = createIdGenerator2({ prefix: "aiobj-", size: 24 });
2287
+ var originalGenerateId2 = createIdGenerator2({ prefix: "aiobj", size: 24 });
2362
2288
  async function streamObject({
2363
2289
  model,
2364
2290
  schema: inputSchema,
@@ -2451,7 +2377,8 @@ async function streamObject({
2451
2377
  inputFormat: standardPrompt.type,
2452
2378
  prompt: await convertToLanguageModelPrompt({
2453
2379
  prompt: standardPrompt,
2454
- modelSupportsImageUrls: model.supportsImageUrls
2380
+ modelSupportsImageUrls: model.supportsImageUrls,
2381
+ modelSupportsUrl: model.supportsUrl
2455
2382
  }),
2456
2383
  providerMetadata,
2457
2384
  abortSignal,
@@ -2493,7 +2420,8 @@ async function streamObject({
2493
2420
  inputFormat: validatedPrompt.type,
2494
2421
  prompt: await convertToLanguageModelPrompt({
2495
2422
  prompt: validatedPrompt,
2496
- modelSupportsImageUrls: model.supportsImageUrls
2423
+ modelSupportsImageUrls: model.supportsImageUrls,
2424
+ modelSupportsUrl: model.supportsUrl
2497
2425
  }),
2498
2426
  providerMetadata,
2499
2427
  abortSignal,
@@ -2526,7 +2454,7 @@ async function streamObject({
2526
2454
  }
2527
2455
  }
2528
2456
  const {
2529
- result: { stream, warnings, rawResponse },
2457
+ result: { stream, warnings, rawResponse, request },
2530
2458
  doStreamSpan,
2531
2459
  startTimestampMs
2532
2460
  } = await retry(
@@ -2572,6 +2500,7 @@ async function streamObject({
2572
2500
  stream: stream.pipeThrough(new TransformStream(transformer)),
2573
2501
  warnings,
2574
2502
  rawResponse,
2503
+ request: request != null ? request : {},
2575
2504
  onFinish,
2576
2505
  rootSpan,
2577
2506
  doStreamSpan,
@@ -2590,6 +2519,7 @@ var DefaultStreamObjectResult = class {
2590
2519
  stream,
2591
2520
  warnings,
2592
2521
  rawResponse,
2522
+ request,
2593
2523
  outputStrategy,
2594
2524
  onFinish,
2595
2525
  rootSpan,
@@ -2604,6 +2534,7 @@ var DefaultStreamObjectResult = class {
2604
2534
  this.warnings = warnings;
2605
2535
  this.rawResponse = rawResponse;
2606
2536
  this.outputStrategy = outputStrategy;
2537
+ this.request = Promise.resolve(request);
2607
2538
  this.objectPromise = new DelayedPromise();
2608
2539
  const { resolve: resolveUsage, promise: usagePromise } = createResolvablePromise();
2609
2540
  this.usage = usagePromise;
@@ -2879,7 +2810,7 @@ import {
2879
2810
  } from "@ai-sdk/provider";
2880
2811
 
2881
2812
  // errors/invalid-tool-arguments-error.ts
2882
- import { AISDKError as AISDKError7, getErrorMessage as getErrorMessage3 } from "@ai-sdk/provider";
2813
+ import { AISDKError as AISDKError7, getErrorMessage as getErrorMessage2 } from "@ai-sdk/provider";
2883
2814
  var name7 = "AI_InvalidToolArgumentsError";
2884
2815
  var marker7 = `vercel.ai.error.${name7}`;
2885
2816
  var symbol7 = Symbol.for(marker7);
@@ -2889,7 +2820,7 @@ var InvalidToolArgumentsError = class extends AISDKError7 {
2889
2820
  toolArgs,
2890
2821
  toolName,
2891
2822
  cause,
2892
- message = `Invalid arguments for tool ${toolName}: ${getErrorMessage3(
2823
+ message = `Invalid arguments for tool ${toolName}: ${getErrorMessage2(
2893
2824
  cause
2894
2825
  )}`
2895
2826
  }) {
@@ -3009,12 +2940,30 @@ function prepareToolsAndToolChoice({
3009
2940
  ([name11]) => activeTools.includes(name11)
3010
2941
  ) : Object.entries(tools);
3011
2942
  return {
3012
- tools: filteredTools.map(([name11, tool2]) => ({
3013
- type: "function",
3014
- name: name11,
3015
- description: tool2.description,
3016
- parameters: asSchema2(tool2.parameters).jsonSchema
3017
- })),
2943
+ tools: filteredTools.map(([name11, tool2]) => {
2944
+ const toolType = tool2.type;
2945
+ switch (toolType) {
2946
+ case void 0:
2947
+ case "function":
2948
+ return {
2949
+ type: "function",
2950
+ name: name11,
2951
+ description: tool2.description,
2952
+ parameters: asSchema2(tool2.parameters).jsonSchema
2953
+ };
2954
+ case "provider-defined":
2955
+ return {
2956
+ type: "provider-defined",
2957
+ name: name11,
2958
+ id: tool2.id,
2959
+ args: tool2.args
2960
+ };
2961
+ default: {
2962
+ const exhaustiveCheck = toolType;
2963
+ throw new Error(`Unsupported tool type: ${exhaustiveCheck}`);
2964
+ }
2965
+ }
2966
+ }),
3018
2967
  toolChoice: toolChoice == null ? { type: "auto" } : typeof toolChoice === "string" ? { type: toolChoice } : { type: "tool", toolName: toolChoice.toolName }
3019
2968
  };
3020
2969
  }
@@ -3070,6 +3019,7 @@ function parseToolCall({
3070
3019
  // core/generate-text/to-response-messages.ts
3071
3020
  function toResponseMessages({
3072
3021
  text = "",
3022
+ tools,
3073
3023
  toolCalls,
3074
3024
  toolResults
3075
3025
  }) {
@@ -3081,19 +3031,28 @@ function toResponseMessages({
3081
3031
  if (toolResults.length > 0) {
3082
3032
  responseMessages.push({
3083
3033
  role: "tool",
3084
- content: toolResults.map((result) => ({
3085
- type: "tool-result",
3086
- toolCallId: result.toolCallId,
3087
- toolName: result.toolName,
3088
- result: result.result
3089
- }))
3034
+ content: toolResults.map((toolResult) => {
3035
+ const tool2 = tools[toolResult.toolName];
3036
+ return (tool2 == null ? void 0 : tool2.experimental_toToolResultContent) != null ? {
3037
+ type: "tool-result",
3038
+ toolCallId: toolResult.toolCallId,
3039
+ toolName: toolResult.toolName,
3040
+ result: tool2.experimental_toToolResultContent(toolResult.result),
3041
+ content: tool2.experimental_toToolResultContent(toolResult.result)
3042
+ } : {
3043
+ type: "tool-result",
3044
+ toolCallId: toolResult.toolCallId,
3045
+ toolName: toolResult.toolName,
3046
+ result: toolResult.result
3047
+ };
3048
+ })
3090
3049
  });
3091
3050
  }
3092
3051
  return responseMessages;
3093
3052
  }
3094
3053
 
3095
3054
  // core/generate-text/generate-text.ts
3096
- var originalGenerateId3 = createIdGenerator3({ prefix: "aitxt-", size: 24 });
3055
+ var originalGenerateId3 = createIdGenerator3({ prefix: "aitxt", size: 24 });
3097
3056
  async function generateText({
3098
3057
  model,
3099
3058
  tools,
@@ -3132,6 +3091,7 @@ async function generateText({
3132
3091
  headers,
3133
3092
  settings: { ...settings, maxRetries }
3134
3093
  });
3094
+ const initialPrompt = standardizePrompt({ system, prompt, messages });
3135
3095
  const tracer = getTracer(telemetry);
3136
3096
  return recordSpan({
3137
3097
  name: "ai.generateText",
@@ -3152,9 +3112,8 @@ async function generateText({
3152
3112
  }),
3153
3113
  tracer,
3154
3114
  fn: async (span) => {
3155
- var _a11, _b, _c, _d, _e;
3115
+ var _a11, _b, _c, _d, _e, _f, _g;
3156
3116
  const retry = retryWithExponentialBackoff({ maxRetries });
3157
- const currentPrompt = standardizePrompt({ system, prompt, messages });
3158
3117
  const mode = {
3159
3118
  type: "regular",
3160
3119
  ...prepareToolsAndToolChoice({ tools, toolChoice, activeTools })
@@ -3175,11 +3134,17 @@ async function generateText({
3175
3134
  let stepType = "initial";
3176
3135
  do {
3177
3136
  if (stepCount === 1) {
3178
- currentPrompt.type = "messages";
3137
+ initialPrompt.type = "messages";
3179
3138
  }
3139
+ const promptFormat = stepCount === 0 ? initialPrompt.type : "messages";
3180
3140
  const promptMessages = await convertToLanguageModelPrompt({
3181
- prompt: currentPrompt,
3182
- modelSupportsImageUrls: model.supportsImageUrls
3141
+ prompt: {
3142
+ type: promptFormat,
3143
+ system: initialPrompt.system,
3144
+ messages: [...initialPrompt.messages, ...responseMessages]
3145
+ },
3146
+ modelSupportsImageUrls: model.supportsImageUrls,
3147
+ modelSupportsUrl: model.supportsUrl
3183
3148
  });
3184
3149
  currentModelResponse = await retry(
3185
3150
  () => recordSpan({
@@ -3192,7 +3157,7 @@ async function generateText({
3192
3157
  telemetry
3193
3158
  }),
3194
3159
  ...baseTelemetryAttributes,
3195
- "ai.prompt.format": { input: () => currentPrompt.type },
3160
+ "ai.prompt.format": { input: () => promptFormat },
3196
3161
  "ai.prompt.messages": {
3197
3162
  input: () => JSON.stringify(promptMessages)
3198
3163
  },
@@ -3210,11 +3175,11 @@ async function generateText({
3210
3175
  }),
3211
3176
  tracer,
3212
3177
  fn: async (span2) => {
3213
- var _a12, _b2, _c2, _d2, _e2, _f;
3178
+ var _a12, _b2, _c2, _d2, _e2, _f2;
3214
3179
  const result = await model.doGenerate({
3215
3180
  mode,
3216
3181
  ...callSettings,
3217
- inputFormat: currentPrompt.type,
3182
+ inputFormat: promptFormat,
3218
3183
  prompt: promptMessages,
3219
3184
  providerMetadata,
3220
3185
  abortSignal,
@@ -3223,7 +3188,7 @@ async function generateText({
3223
3188
  const responseData = {
3224
3189
  id: (_b2 = (_a12 = result.response) == null ? void 0 : _a12.id) != null ? _b2 : generateId3(),
3225
3190
  timestamp: (_d2 = (_c2 = result.response) == null ? void 0 : _c2.timestamp) != null ? _d2 : currentDate(),
3226
- modelId: (_f = (_e2 = result.response) == null ? void 0 : _e2.modelId) != null ? _f : model.modelId
3191
+ modelId: (_f2 = (_e2 = result.response) == null ? void 0 : _e2.modelId) != null ? _f2 : model.modelId
3227
3192
  };
3228
3193
  span2.setAttributes(
3229
3194
  selectTelemetryAttributes({
@@ -3293,7 +3258,27 @@ async function generateText({
3293
3258
  }
3294
3259
  const stepText = nextStepType === "continue" ? removeTextAfterLastWhitespace((_b = currentModelResponse.text) != null ? _b : "") : (_c = currentModelResponse.text) != null ? _c : "";
3295
3260
  text = nextStepType === "continue" || stepType === "continue" ? text + stepText : stepText;
3296
- const currentStep = {
3261
+ if (stepType === "continue") {
3262
+ const lastMessage = responseMessages[responseMessages.length - 1];
3263
+ if (typeof lastMessage.content === "string") {
3264
+ lastMessage.content = text;
3265
+ } else {
3266
+ lastMessage.content.push({
3267
+ text: stepText,
3268
+ type: "text"
3269
+ });
3270
+ }
3271
+ } else {
3272
+ responseMessages.push(
3273
+ ...toResponseMessages({
3274
+ text,
3275
+ tools: tools != null ? tools : {},
3276
+ toolCalls: currentToolCalls,
3277
+ toolResults: currentToolResults
3278
+ })
3279
+ );
3280
+ }
3281
+ const currentStepResult = {
3297
3282
  stepType,
3298
3283
  text: stepText,
3299
3284
  toolCalls: currentToolCalls,
@@ -3302,36 +3287,18 @@ async function generateText({
3302
3287
  usage: currentUsage,
3303
3288
  warnings: currentModelResponse.warnings,
3304
3289
  logprobs: currentModelResponse.logprobs,
3290
+ request: (_d = currentModelResponse.request) != null ? _d : {},
3305
3291
  response: {
3306
3292
  ...currentModelResponse.response,
3307
- headers: (_d = currentModelResponse.rawResponse) == null ? void 0 : _d.headers
3293
+ headers: (_e = currentModelResponse.rawResponse) == null ? void 0 : _e.headers,
3294
+ // deep clone msgs to avoid mutating past messages in multi-step:
3295
+ messages: JSON.parse(JSON.stringify(responseMessages))
3308
3296
  },
3309
3297
  experimental_providerMetadata: currentModelResponse.providerMetadata,
3310
3298
  isContinued: nextStepType === "continue"
3311
3299
  };
3312
- steps.push(currentStep);
3313
- await (onStepFinish == null ? void 0 : onStepFinish(currentStep));
3314
- if (stepType === "continue") {
3315
- const lastMessage = currentPrompt.messages[currentPrompt.messages.length - 1];
3316
- if (typeof lastMessage.content === "string") {
3317
- lastMessage.content = text;
3318
- } else {
3319
- lastMessage.content.push({
3320
- text: stepText,
3321
- type: "text"
3322
- });
3323
- }
3324
- responseMessages[responseMessages.length - 1] = lastMessage;
3325
- currentPrompt.messages[currentPrompt.messages.length - 1] = lastMessage;
3326
- } else {
3327
- const newResponseMessages = toResponseMessages({
3328
- text,
3329
- toolCalls: currentToolCalls,
3330
- toolResults: currentToolResults
3331
- });
3332
- responseMessages.push(...newResponseMessages);
3333
- currentPrompt.messages.push(...newResponseMessages);
3334
- }
3300
+ steps.push(currentStepResult);
3301
+ await (onStepFinish == null ? void 0 : onStepFinish(currentStepResult));
3335
3302
  stepType = nextStepType;
3336
3303
  } while (stepType !== "done");
3337
3304
  span.setAttributes(
@@ -3365,9 +3332,11 @@ async function generateText({
3365
3332
  finishReason: currentModelResponse.finishReason,
3366
3333
  usage,
3367
3334
  warnings: currentModelResponse.warnings,
3335
+ request: (_f = currentModelResponse.request) != null ? _f : {},
3368
3336
  response: {
3369
3337
  ...currentModelResponse.response,
3370
- headers: (_e = currentModelResponse.rawResponse) == null ? void 0 : _e.headers
3338
+ headers: (_g = currentModelResponse.rawResponse) == null ? void 0 : _g.headers,
3339
+ messages: responseMessages
3371
3340
  },
3372
3341
  logprobs: currentModelResponse.logprobs,
3373
3342
  responseMessages,
@@ -3445,6 +3414,7 @@ var DefaultGenerateTextResult = class {
3445
3414
  this.finishReason = options.finishReason;
3446
3415
  this.usage = options.usage;
3447
3416
  this.warnings = options.warnings;
3417
+ this.request = options.request;
3448
3418
  this.response = options.response;
3449
3419
  this.responseMessages = options.responseMessages;
3450
3420
  this.roundtrips = options.steps;
@@ -3802,7 +3772,7 @@ function runToolsTransformation({
3802
3772
  }
3803
3773
 
3804
3774
  // core/generate-text/stream-text.ts
3805
- var originalGenerateId4 = createIdGenerator4({ prefix: "aitxt-", size: 24 });
3775
+ var originalGenerateId4 = createIdGenerator4({ prefix: "aitxt", size: 24 });
3806
3776
  async function streamText({
3807
3777
  model,
3808
3778
  tools,
@@ -3844,6 +3814,7 @@ async function streamText({
3844
3814
  settings: { ...settings, maxRetries }
3845
3815
  });
3846
3816
  const tracer = getTracer(telemetry);
3817
+ const initialPrompt = standardizePrompt({ system, prompt, messages });
3847
3818
  return recordSpan({
3848
3819
  name: "ai.streamText",
3849
3820
  attributes: selectTelemetryAttributes({
@@ -3863,14 +3834,20 @@ async function streamText({
3863
3834
  fn: async (rootSpan) => {
3864
3835
  const retry = retryWithExponentialBackoff({ maxRetries });
3865
3836
  const startStep = async ({
3866
- currentPrompt: currentPrompt2
3837
+ responseMessages
3867
3838
  }) => {
3839
+ const promptFormat = responseMessages.length === 0 ? initialPrompt.type : "messages";
3868
3840
  const promptMessages = await convertToLanguageModelPrompt({
3869
- prompt: currentPrompt2,
3870
- modelSupportsImageUrls: model.supportsImageUrls
3841
+ prompt: {
3842
+ type: promptFormat,
3843
+ system: initialPrompt.system,
3844
+ messages: [...initialPrompt.messages, ...responseMessages]
3845
+ },
3846
+ modelSupportsImageUrls: model.supportsImageUrls,
3847
+ modelSupportsUrl: model.supportsUrl
3871
3848
  });
3872
3849
  const {
3873
- result: { stream: stream2, warnings: warnings2, rawResponse: rawResponse2 },
3850
+ result: { stream: stream2, warnings: warnings2, rawResponse: rawResponse2, request: request2 },
3874
3851
  doStreamSpan: doStreamSpan2,
3875
3852
  startTimestampMs: startTimestampMs2
3876
3853
  } = await retry(
@@ -3885,7 +3862,7 @@ async function streamText({
3885
3862
  }),
3886
3863
  ...baseTelemetryAttributes,
3887
3864
  "ai.prompt.format": {
3888
- input: () => currentPrompt2.type
3865
+ input: () => promptFormat
3889
3866
  },
3890
3867
  "ai.prompt.messages": {
3891
3868
  input: () => JSON.stringify(promptMessages)
@@ -3918,7 +3895,7 @@ async function streamText({
3918
3895
  })
3919
3896
  },
3920
3897
  ...prepareCallSettings(settings),
3921
- inputFormat: currentPrompt2.type,
3898
+ inputFormat: promptFormat,
3922
3899
  prompt: promptMessages,
3923
3900
  providerMetadata,
3924
3901
  abortSignal,
@@ -3938,22 +3915,23 @@ async function streamText({
3938
3915
  abortSignal
3939
3916
  }),
3940
3917
  warnings: warnings2,
3918
+ request: request2 != null ? request2 : {},
3941
3919
  rawResponse: rawResponse2
3942
3920
  },
3943
3921
  doStreamSpan: doStreamSpan2,
3944
3922
  startTimestampMs: startTimestampMs2
3945
3923
  };
3946
3924
  };
3947
- const currentPrompt = standardizePrompt({ system, prompt, messages });
3948
3925
  const {
3949
- result: { stream, warnings, rawResponse },
3926
+ result: { stream, warnings, rawResponse, request },
3950
3927
  doStreamSpan,
3951
3928
  startTimestampMs
3952
- } = await startStep({ currentPrompt });
3929
+ } = await startStep({ responseMessages: [] });
3953
3930
  return new DefaultStreamTextResult({
3954
3931
  stream,
3955
3932
  warnings,
3956
3933
  rawResponse,
3934
+ request,
3957
3935
  onChunk,
3958
3936
  onFinish,
3959
3937
  onStepFinish,
@@ -3964,11 +3942,11 @@ async function streamText({
3964
3942
  maxSteps,
3965
3943
  continueSteps,
3966
3944
  startStep,
3967
- currentPrompt,
3968
3945
  modelId: model.modelId,
3969
3946
  now: now2,
3970
3947
  currentDate,
3971
- generateId: generateId3
3948
+ generateId: generateId3,
3949
+ tools
3972
3950
  });
3973
3951
  }
3974
3952
  });
@@ -3978,6 +3956,7 @@ var DefaultStreamTextResult = class {
3978
3956
  stream,
3979
3957
  warnings,
3980
3958
  rawResponse,
3959
+ request,
3981
3960
  onChunk,
3982
3961
  onFinish,
3983
3962
  onStepFinish,
@@ -3988,11 +3967,11 @@ var DefaultStreamTextResult = class {
3988
3967
  maxSteps,
3989
3968
  continueSteps,
3990
3969
  startStep,
3991
- currentPrompt,
3992
3970
  modelId,
3993
3971
  now: now2,
3994
3972
  currentDate,
3995
- generateId: generateId3
3973
+ generateId: generateId3,
3974
+ tools
3996
3975
  }) {
3997
3976
  this.warnings = warnings;
3998
3977
  this.rawResponse = rawResponse;
@@ -4013,6 +3992,8 @@ var DefaultStreamTextResult = class {
4013
3992
  promise: providerMetadataPromise
4014
3993
  } = createResolvablePromise();
4015
3994
  this.experimental_providerMetadata = providerMetadataPromise;
3995
+ const { resolve: resolveRequest, promise: requestPromise } = createResolvablePromise();
3996
+ this.request = requestPromise;
4016
3997
  const { resolve: resolveResponse, promise: responsePromise } = createResolvablePromise();
4017
3998
  this.response = responsePromise;
4018
3999
  const {
@@ -4033,14 +4014,15 @@ var DefaultStreamTextResult = class {
4033
4014
  startTimestamp,
4034
4015
  doStreamSpan: doStreamSpan2,
4035
4016
  currentStep,
4036
- currentPrompt: currentPrompt2,
4017
+ responseMessages,
4037
4018
  usage = {
4038
4019
  promptTokens: 0,
4039
4020
  completionTokens: 0,
4040
4021
  totalTokens: 0
4041
4022
  },
4042
4023
  stepType,
4043
- previousStepText = ""
4024
+ previousStepText = "",
4025
+ stepRequest
4044
4026
  }) {
4045
4027
  const stepToolCalls = [];
4046
4028
  const stepToolResults = [];
@@ -4167,6 +4149,7 @@ var DefaultStreamTextResult = class {
4167
4149
  },
4168
4150
  // invoke onFinish callback and resolve toolResults promise when the stream is about to close:
4169
4151
  async flush(controller) {
4152
+ var _a11;
4170
4153
  const stepToolCallsJson = stepToolCalls.length > 0 ? JSON.stringify(stepToolCalls) : void 0;
4171
4154
  let nextStepType = "done";
4172
4155
  if (currentStep + 1 < maxSteps) {
@@ -4232,10 +4215,32 @@ var DefaultStreamTextResult = class {
4232
4215
  usage: stepUsage,
4233
4216
  experimental_providerMetadata: stepProviderMetadata,
4234
4217
  logprobs: stepLogProbs,
4235
- response: stepResponse,
4218
+ response: {
4219
+ ...stepResponse
4220
+ },
4236
4221
  isContinued: nextStepType === "continue"
4237
4222
  });
4238
- const stepResult = {
4223
+ if (stepType === "continue") {
4224
+ const lastMessage = responseMessages[responseMessages.length - 1];
4225
+ if (typeof lastMessage.content === "string") {
4226
+ lastMessage.content = stepText;
4227
+ } else {
4228
+ lastMessage.content.push({
4229
+ text: stepText,
4230
+ type: "text"
4231
+ });
4232
+ }
4233
+ } else {
4234
+ responseMessages.push(
4235
+ ...toResponseMessages({
4236
+ text: stepText,
4237
+ tools: tools != null ? tools : {},
4238
+ toolCalls: stepToolCalls,
4239
+ toolResults: stepToolResults
4240
+ })
4241
+ );
4242
+ }
4243
+ const currentStepResult = {
4239
4244
  stepType,
4240
4245
  text: stepText,
4241
4246
  toolCalls: stepToolCalls,
@@ -4244,49 +4249,30 @@ var DefaultStreamTextResult = class {
4244
4249
  usage: stepUsage,
4245
4250
  warnings: self.warnings,
4246
4251
  logprobs: stepLogProbs,
4247
- response: stepResponse,
4252
+ request: stepRequest,
4248
4253
  rawResponse: self.rawResponse,
4254
+ response: {
4255
+ ...stepResponse,
4256
+ headers: (_a11 = self.rawResponse) == null ? void 0 : _a11.headers,
4257
+ // deep clone msgs to avoid mutating past messages in multi-step:
4258
+ messages: JSON.parse(JSON.stringify(responseMessages))
4259
+ },
4249
4260
  experimental_providerMetadata: stepProviderMetadata,
4250
4261
  isContinued: nextStepType === "continue"
4251
4262
  };
4252
- stepResults.push(stepResult);
4253
- await (onStepFinish == null ? void 0 : onStepFinish(stepResult));
4263
+ stepResults.push(currentStepResult);
4264
+ await (onStepFinish == null ? void 0 : onStepFinish(currentStepResult));
4254
4265
  const combinedUsage = {
4255
4266
  promptTokens: usage.promptTokens + stepUsage.promptTokens,
4256
4267
  completionTokens: usage.completionTokens + stepUsage.completionTokens,
4257
4268
  totalTokens: usage.totalTokens + stepUsage.totalTokens
4258
4269
  };
4259
4270
  if (nextStepType !== "done") {
4260
- if (stepType === "continue") {
4261
- const lastMessage = currentPrompt2.messages[currentPrompt2.messages.length - 1];
4262
- if (typeof lastMessage.content === "string") {
4263
- lastMessage.content = stepText;
4264
- } else {
4265
- lastMessage.content.push({
4266
- text: stepText,
4267
- type: "text"
4268
- });
4269
- }
4270
- currentPrompt2.messages[currentPrompt2.messages.length - 1] = lastMessage;
4271
- } else {
4272
- const newResponseMessages = toResponseMessages({
4273
- text: stepText,
4274
- toolCalls: stepToolCalls,
4275
- toolResults: stepToolResults
4276
- });
4277
- currentPrompt2.messages.push(...newResponseMessages);
4278
- }
4279
4271
  const {
4280
4272
  result,
4281
4273
  doStreamSpan: doStreamSpan3,
4282
4274
  startTimestampMs: startTimestamp2
4283
- } = await startStep({
4284
- currentPrompt: {
4285
- type: "messages",
4286
- system: currentPrompt2.system,
4287
- messages: currentPrompt2.messages
4288
- }
4289
- });
4275
+ } = await startStep({ responseMessages });
4290
4276
  self.warnings = result.warnings;
4291
4277
  self.rawResponse = result.rawResponse;
4292
4278
  addStepStream({
@@ -4294,10 +4280,11 @@ var DefaultStreamTextResult = class {
4294
4280
  startTimestamp: startTimestamp2,
4295
4281
  doStreamSpan: doStreamSpan3,
4296
4282
  currentStep: currentStep + 1,
4297
- currentPrompt: currentPrompt2,
4283
+ responseMessages,
4298
4284
  usage: combinedUsage,
4299
4285
  stepType: nextStepType,
4300
- previousStepText: fullStepText
4286
+ previousStepText: fullStepText,
4287
+ stepRequest: result.request
4301
4288
  });
4302
4289
  return;
4303
4290
  }
@@ -4308,7 +4295,9 @@ var DefaultStreamTextResult = class {
4308
4295
  usage: combinedUsage,
4309
4296
  experimental_providerMetadata: stepProviderMetadata,
4310
4297
  logprobs: stepLogProbs,
4311
- response: stepResponse
4298
+ response: {
4299
+ ...stepResponse
4300
+ }
4312
4301
  });
4313
4302
  closeStitchableStream();
4314
4303
  rootSpan.setAttributes(
@@ -4331,34 +4320,13 @@ var DefaultStreamTextResult = class {
4331
4320
  }
4332
4321
  })
4333
4322
  );
4334
- const responseMessages = stepResults.reduce((responseMessages2, step) => {
4335
- if (step.stepType === "continue") {
4336
- const lastResponseMessage = responseMessages2.pop();
4337
- if (typeof lastResponseMessage.content === "string") {
4338
- lastResponseMessage.content += step.text;
4339
- } else {
4340
- lastResponseMessage.content.push({
4341
- text: step.text,
4342
- type: "text"
4343
- });
4344
- }
4345
- return [...responseMessages2, lastResponseMessage];
4346
- }
4347
- return [
4348
- ...responseMessages2,
4349
- ...toResponseMessages({
4350
- text: step.text,
4351
- toolCalls: step.toolCalls,
4352
- toolResults: step.toolResults
4353
- })
4354
- ];
4355
- }, []);
4356
4323
  resolveUsage(combinedUsage);
4357
4324
  resolveFinishReason(stepFinishReason);
4358
4325
  resolveText(fullStepText);
4359
4326
  resolveToolCalls(stepToolCalls);
4360
4327
  resolveProviderMetadata(stepProviderMetadata);
4361
4328
  resolveToolResults(stepToolResults);
4329
+ resolveRequest(stepRequest);
4362
4330
  resolveResponse({
4363
4331
  ...stepResponse,
4364
4332
  headers: rawResponse == null ? void 0 : rawResponse.headers
@@ -4376,10 +4344,12 @@ var DefaultStreamTextResult = class {
4376
4344
  // optional as well. Therefore we need to cast the toolResults to any.
4377
4345
  // The type exposed to the users will be correctly inferred.
4378
4346
  toolResults: stepToolResults,
4347
+ request: stepRequest,
4379
4348
  rawResponse,
4380
4349
  response: {
4381
4350
  ...stepResponse,
4382
- headers: rawResponse == null ? void 0 : rawResponse.headers
4351
+ headers: rawResponse == null ? void 0 : rawResponse.headers,
4352
+ messages: responseMessages
4383
4353
  },
4384
4354
  warnings,
4385
4355
  experimental_providerMetadata: stepProviderMetadata,
@@ -4401,9 +4371,10 @@ var DefaultStreamTextResult = class {
4401
4371
  startTimestamp: startTimestampMs,
4402
4372
  doStreamSpan,
4403
4373
  currentStep: 0,
4404
- currentPrompt,
4374
+ responseMessages: [],
4405
4375
  usage: void 0,
4406
- stepType: "initial"
4376
+ stepType: "initial",
4377
+ stepRequest: request
4407
4378
  });
4408
4379
  }
4409
4380
  /**
@@ -4442,7 +4413,7 @@ var DefaultStreamTextResult = class {
4442
4413
  }
4443
4414
  toDataStreamInternal({
4444
4415
  callbacks = {},
4445
- getErrorMessage: getErrorMessage4 = () => "",
4416
+ getErrorMessage: getErrorMessage3 = () => "",
4446
4417
  // mask error messages for safety by default
4447
4418
  sendUsage = true
4448
4419
  } = {}) {
@@ -4517,7 +4488,7 @@ var DefaultStreamTextResult = class {
4517
4488
  }
4518
4489
  case "error": {
4519
4490
  controller.enqueue(
4520
- formatStreamPart("error", getErrorMessage4(chunk.error))
4491
+ formatStreamPart("error", getErrorMessage3(chunk.error))
4521
4492
  );
4522
4493
  break;
4523
4494
  }
@@ -4565,7 +4536,7 @@ var DefaultStreamTextResult = class {
4565
4536
  statusText: "statusText" in options ? options.statusText : void 0
4566
4537
  };
4567
4538
  const data = options == null ? void 0 : "data" in options ? options.data : void 0;
4568
- const getErrorMessage4 = options == null ? void 0 : "getErrorMessage" in options ? options.getErrorMessage : void 0;
4539
+ const getErrorMessage3 = options == null ? void 0 : "getErrorMessage" in options ? options.getErrorMessage : void 0;
4569
4540
  const sendUsage = options == null ? void 0 : "sendUsage" in options ? options.sendUsage : void 0;
4570
4541
  writeToServerResponse({
4571
4542
  response,
@@ -4575,7 +4546,7 @@ var DefaultStreamTextResult = class {
4575
4546
  contentType: "text/plain; charset=utf-8",
4576
4547
  dataStreamVersion: "v1"
4577
4548
  }),
4578
- stream: this.toDataStream({ data, getErrorMessage: getErrorMessage4, sendUsage })
4549
+ stream: this.toDataStream({ data, getErrorMessage: getErrorMessage3, sendUsage })
4579
4550
  });
4580
4551
  }
4581
4552
  pipeTextStreamToResponse(response, init) {
@@ -4607,10 +4578,10 @@ var DefaultStreamTextResult = class {
4607
4578
  statusText: "statusText" in options ? options.statusText : void 0
4608
4579
  };
4609
4580
  const data = options == null ? void 0 : "data" in options ? options.data : void 0;
4610
- const getErrorMessage4 = options == null ? void 0 : "getErrorMessage" in options ? options.getErrorMessage : void 0;
4581
+ const getErrorMessage3 = options == null ? void 0 : "getErrorMessage" in options ? options.getErrorMessage : void 0;
4611
4582
  const sendUsage = options == null ? void 0 : "sendUsage" in options ? options.sendUsage : void 0;
4612
4583
  return new Response(
4613
- this.toDataStream({ data, getErrorMessage: getErrorMessage4, sendUsage }),
4584
+ this.toDataStream({ data, getErrorMessage: getErrorMessage3, sendUsage }),
4614
4585
  {
4615
4586
  status: (_a11 = init == null ? void 0 : init.status) != null ? _a11 : 200,
4616
4587
  statusText: init == null ? void 0 : init.statusText,
@@ -4652,6 +4623,7 @@ var experimental_wrapLanguageModel = ({
4652
4623
  modelId: modelId != null ? modelId : model.modelId,
4653
4624
  defaultObjectGenerationMode: model.defaultObjectGenerationMode,
4654
4625
  supportsImageUrls: model.supportsImageUrls,
4626
+ supportsUrl: model.supportsUrl,
4655
4627
  supportsStructuredOutputs: model.supportsStructuredOutputs,
4656
4628
  async doGenerate(params) {
4657
4629
  const transformedParams = await doTransform({ params, type: "generate" });
@@ -4775,7 +4747,7 @@ function convertToCoreMessages(messages) {
4775
4747
  role: "assistant",
4776
4748
  content: [
4777
4749
  { type: "text", text: content },
4778
- ...toolInvocations.filter((invocation) => invocation.state !== "partial-call").map(({ toolCallId, toolName, args }) => ({
4750
+ ...toolInvocations.map(({ toolCallId, toolName, args }) => ({
4779
4751
  type: "tool-call",
4780
4752
  toolCallId,
4781
4753
  toolName,
@@ -4783,19 +4755,25 @@ function convertToCoreMessages(messages) {
4783
4755
  }))
4784
4756
  ]
4785
4757
  });
4786
- const toolResults = toolInvocations.filter((invocation) => invocation.state === "result").map(({ toolCallId, toolName, args, result }) => ({
4787
- type: "tool-result",
4788
- toolCallId,
4789
- toolName,
4790
- args,
4791
- result
4792
- }));
4793
- if (toolResults.length > 0) {
4794
- coreMessages.push({
4795
- role: "tool",
4796
- content: toolResults
4797
- });
4798
- }
4758
+ coreMessages.push({
4759
+ role: "tool",
4760
+ content: toolInvocations.map((ToolInvocation) => {
4761
+ if (!("result" in ToolInvocation)) {
4762
+ throw new MessageConversionError({
4763
+ originalMessage: message,
4764
+ message: "ToolInvocation must have a result: " + JSON.stringify(ToolInvocation)
4765
+ });
4766
+ }
4767
+ const { toolCallId, toolName, args, result } = ToolInvocation;
4768
+ return {
4769
+ type: "tool-result",
4770
+ toolCallId,
4771
+ toolName,
4772
+ args,
4773
+ result
4774
+ };
4775
+ })
4776
+ });
4799
4777
  break;
4800
4778
  }
4801
4779
  case "function":