ai 5.0.0-canary.5 → 5.0.0-canary.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. package/CHANGELOG.md +28 -0
  2. package/dist/index.d.mts +168 -448
  3. package/dist/index.d.ts +168 -448
  4. package/dist/index.js +289 -303
  5. package/dist/index.js.map +1 -1
  6. package/dist/index.mjs +205 -219
  7. package/dist/index.mjs.map +1 -1
  8. package/dist/internal/index.d.mts +259 -311
  9. package/dist/internal/index.d.ts +259 -311
  10. package/dist/internal/index.js +139 -155
  11. package/dist/internal/index.js.map +1 -1
  12. package/dist/internal/index.mjs +129 -145
  13. package/dist/internal/index.mjs.map +1 -1
  14. package/dist/mcp-stdio/index.js.map +1 -0
  15. package/dist/mcp-stdio/index.mjs.map +1 -0
  16. package/dist/test/index.js.map +1 -0
  17. package/dist/test/index.mjs.map +1 -0
  18. package/package.json +15 -18
  19. package/mcp-stdio/create-child-process.test.ts +0 -92
  20. package/mcp-stdio/create-child-process.ts +0 -21
  21. package/mcp-stdio/dist/index.js.map +0 -1
  22. package/mcp-stdio/dist/index.mjs.map +0 -1
  23. package/mcp-stdio/get-environment.test.ts +0 -13
  24. package/mcp-stdio/get-environment.ts +0 -43
  25. package/mcp-stdio/index.ts +0 -4
  26. package/mcp-stdio/mcp-stdio-transport.test.ts +0 -262
  27. package/mcp-stdio/mcp-stdio-transport.ts +0 -157
  28. package/test/dist/index.js.map +0 -1
  29. package/test/dist/index.mjs.map +0 -1
  30. /package/{mcp-stdio/dist → dist/mcp-stdio}/index.d.mts +0 -0
  31. /package/{mcp-stdio/dist → dist/mcp-stdio}/index.d.ts +0 -0
  32. /package/{mcp-stdio/dist → dist/mcp-stdio}/index.js +0 -0
  33. /package/{mcp-stdio/dist → dist/mcp-stdio}/index.mjs +0 -0
  34. /package/{test/dist → dist/test}/index.d.mts +0 -0
  35. /package/{test/dist → dist/test}/index.d.ts +0 -0
  36. /package/{test/dist → dist/test}/index.js +0 -0
  37. /package/{test/dist → dist/test}/index.mjs +0 -0
package/dist/index.js CHANGED
@@ -27,31 +27,31 @@ var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__ge
27
27
  ));
28
28
  var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
29
29
 
30
- // streams/index.ts
31
- var streams_exports = {};
32
- __export(streams_exports, {
33
- AISDKError: () => import_provider20.AISDKError,
34
- APICallError: () => import_provider20.APICallError,
30
+ // index.ts
31
+ var ai_exports = {};
32
+ __export(ai_exports, {
33
+ AISDKError: () => import_provider21.AISDKError,
34
+ APICallError: () => import_provider21.APICallError,
35
35
  DownloadError: () => DownloadError,
36
- EmptyResponseBodyError: () => import_provider20.EmptyResponseBodyError,
36
+ EmptyResponseBodyError: () => import_provider21.EmptyResponseBodyError,
37
37
  InvalidArgumentError: () => InvalidArgumentError,
38
38
  InvalidDataContentError: () => InvalidDataContentError,
39
39
  InvalidMessageRoleError: () => InvalidMessageRoleError,
40
- InvalidPromptError: () => import_provider20.InvalidPromptError,
41
- InvalidResponseDataError: () => import_provider20.InvalidResponseDataError,
40
+ InvalidPromptError: () => import_provider21.InvalidPromptError,
41
+ InvalidResponseDataError: () => import_provider21.InvalidResponseDataError,
42
42
  InvalidStreamPartError: () => InvalidStreamPartError,
43
43
  InvalidToolArgumentsError: () => InvalidToolArgumentsError,
44
- JSONParseError: () => import_provider20.JSONParseError,
44
+ JSONParseError: () => import_provider21.JSONParseError,
45
45
  LangChainAdapter: () => langchain_adapter_exports,
46
46
  LlamaIndexAdapter: () => llamaindex_adapter_exports,
47
- LoadAPIKeyError: () => import_provider20.LoadAPIKeyError,
47
+ LoadAPIKeyError: () => import_provider21.LoadAPIKeyError,
48
48
  MCPClientError: () => MCPClientError,
49
49
  MessageConversionError: () => MessageConversionError,
50
- NoContentGeneratedError: () => import_provider20.NoContentGeneratedError,
50
+ NoContentGeneratedError: () => import_provider21.NoContentGeneratedError,
51
51
  NoImageGeneratedError: () => NoImageGeneratedError,
52
52
  NoObjectGeneratedError: () => NoObjectGeneratedError,
53
53
  NoOutputSpecifiedError: () => NoOutputSpecifiedError,
54
- NoSuchModelError: () => import_provider20.NoSuchModelError,
54
+ NoSuchModelError: () => import_provider21.NoSuchModelError,
55
55
  NoSuchProviderError: () => NoSuchProviderError,
56
56
  NoSuchToolError: () => NoSuchToolError,
57
57
  Output: () => output_exports,
@@ -59,8 +59,8 @@ __export(streams_exports, {
59
59
  StreamData: () => StreamData,
60
60
  ToolCallRepairError: () => ToolCallRepairError,
61
61
  ToolExecutionError: () => ToolExecutionError,
62
- TypeValidationError: () => import_provider20.TypeValidationError,
63
- UnsupportedFunctionalityError: () => import_provider20.UnsupportedFunctionalityError,
62
+ TypeValidationError: () => import_provider21.TypeValidationError,
63
+ UnsupportedFunctionalityError: () => import_provider21.UnsupportedFunctionalityError,
64
64
  appendClientMessage: () => appendClientMessage,
65
65
  appendResponseMessages: () => appendResponseMessages,
66
66
  asSchema: () => asSchema,
@@ -75,7 +75,7 @@ __export(streams_exports, {
75
75
  cosineSimilarity: () => cosineSimilarity,
76
76
  createDataStream: () => createDataStream,
77
77
  createDataStreamResponse: () => createDataStreamResponse,
78
- createIdGenerator: () => import_provider_utils21.createIdGenerator,
78
+ createIdGenerator: () => import_provider_utils20.createIdGenerator,
79
79
  createProviderRegistry: () => createProviderRegistry,
80
80
  customProvider: () => customProvider,
81
81
  defaultSettingsMiddleware: () => defaultSettingsMiddleware,
@@ -91,7 +91,7 @@ __export(streams_exports, {
91
91
  extractReasoningMiddleware: () => extractReasoningMiddleware,
92
92
  fillMessageParts: () => fillMessageParts,
93
93
  formatDataStreamPart: () => formatDataStreamPart,
94
- generateId: () => import_provider_utils21.generateId,
94
+ generateId: () => import_provider_utils20.generateId,
95
95
  generateObject: () => generateObject,
96
96
  generateText: () => generateText,
97
97
  getMessageParts: () => getMessageParts,
@@ -116,10 +116,10 @@ __export(streams_exports, {
116
116
  wrapLanguageModel: () => wrapLanguageModel,
117
117
  zodSchema: () => zodSchema
118
118
  });
119
- module.exports = __toCommonJS(streams_exports);
119
+ module.exports = __toCommonJS(ai_exports);
120
120
 
121
121
  // core/index.ts
122
- var import_provider_utils21 = require("@ai-sdk/provider-utils");
122
+ var import_provider_utils20 = require("@ai-sdk/provider-utils");
123
123
 
124
124
  // core/util/index.ts
125
125
  var import_provider_utils5 = require("@ai-sdk/provider-utils");
@@ -1536,7 +1536,10 @@ function isSchema(value) {
1536
1536
  return typeof value === "object" && value !== null && schemaSymbol in value && value[schemaSymbol] === true && "jsonSchema" in value && "validate" in value;
1537
1537
  }
1538
1538
  function asSchema(schema) {
1539
- return isSchema(schema) ? schema : zodSchema(schema);
1539
+ return schema == null ? jsonSchema({
1540
+ properties: {},
1541
+ additionalProperties: false
1542
+ }) : isSchema(schema) ? schema : zodSchema(schema);
1540
1543
  }
1541
1544
 
1542
1545
  // core/util/should-resubmit-messages.ts
@@ -2629,8 +2632,8 @@ var DefaultGenerateImageResult = class {
2629
2632
  };
2630
2633
 
2631
2634
  // core/generate-object/generate-object.ts
2632
- var import_provider12 = require("@ai-sdk/provider");
2633
- var import_provider_utils12 = require("@ai-sdk/provider-utils");
2635
+ var import_provider13 = require("@ai-sdk/provider");
2636
+ var import_provider_utils11 = require("@ai-sdk/provider-utils");
2634
2637
 
2635
2638
  // errors/no-object-generated-error.ts
2636
2639
  var import_provider5 = require("@ai-sdk/provider");
@@ -2660,9 +2663,6 @@ var NoObjectGeneratedError = class extends import_provider5.AISDKError {
2660
2663
  };
2661
2664
  _a4 = symbol4;
2662
2665
 
2663
- // core/prompt/convert-to-language-model-prompt.ts
2664
- var import_provider_utils9 = require("@ai-sdk/provider-utils");
2665
-
2666
2666
  // util/download-error.ts
2667
2667
  var import_provider6 = require("@ai-sdk/provider");
2668
2668
  var name5 = "AI_DownloadError";
@@ -2715,7 +2715,9 @@ async function download({ url }) {
2715
2715
  }
2716
2716
 
2717
2717
  // core/prompt/data-content.ts
2718
+ var import_provider8 = require("@ai-sdk/provider");
2718
2719
  var import_provider_utils8 = require("@ai-sdk/provider-utils");
2720
+ var import_zod = require("zod");
2719
2721
 
2720
2722
  // core/prompt/invalid-data-content-error.ts
2721
2723
  var import_provider7 = require("@ai-sdk/provider");
@@ -2739,8 +2741,23 @@ var InvalidDataContentError = class extends import_provider7.AISDKError {
2739
2741
  };
2740
2742
  _a6 = symbol6;
2741
2743
 
2744
+ // core/prompt/split-data-url.ts
2745
+ function splitDataUrl(dataUrl) {
2746
+ try {
2747
+ const [header, base64Content] = dataUrl.split(",");
2748
+ return {
2749
+ mediaType: header.split(";")[0].split(":")[1],
2750
+ base64Content
2751
+ };
2752
+ } catch (error) {
2753
+ return {
2754
+ mediaType: void 0,
2755
+ base64Content: void 0
2756
+ };
2757
+ }
2758
+ }
2759
+
2742
2760
  // core/prompt/data-content.ts
2743
- var import_zod = require("zod");
2744
2761
  var dataContentSchema = import_zod.z.union([
2745
2762
  import_zod.z.string(),
2746
2763
  import_zod.z.instanceof(Uint8Array),
@@ -2754,6 +2771,33 @@ var dataContentSchema = import_zod.z.union([
2754
2771
  { message: "Must be a Buffer" }
2755
2772
  )
2756
2773
  ]);
2774
+ function convertToLanguageModelV2DataContent(content) {
2775
+ if (content instanceof Uint8Array) {
2776
+ return { data: content, mediaType: void 0 };
2777
+ }
2778
+ if (content instanceof ArrayBuffer) {
2779
+ return { data: new Uint8Array(content), mediaType: void 0 };
2780
+ }
2781
+ if (typeof content === "string") {
2782
+ try {
2783
+ content = new URL(content);
2784
+ } catch (error) {
2785
+ }
2786
+ }
2787
+ if (content instanceof URL && content.protocol === "data:") {
2788
+ const { mediaType: dataUrlMediaType, base64Content } = splitDataUrl(
2789
+ content.toString()
2790
+ );
2791
+ if (dataUrlMediaType == null || base64Content == null) {
2792
+ throw new import_provider8.AISDKError({
2793
+ name: "InvalidDataContentError",
2794
+ message: `Invalid data URL format in content ${content.toString()}`
2795
+ });
2796
+ }
2797
+ return { data: base64Content, mediaType: dataUrlMediaType };
2798
+ }
2799
+ return { data: content, mediaType: void 0 };
2800
+ }
2757
2801
  function convertDataContentToBase64String(content) {
2758
2802
  if (typeof content === "string") {
2759
2803
  return content;
@@ -2792,12 +2836,12 @@ function convertUint8ArrayToText(uint8Array) {
2792
2836
  }
2793
2837
 
2794
2838
  // core/prompt/invalid-message-role-error.ts
2795
- var import_provider8 = require("@ai-sdk/provider");
2839
+ var import_provider9 = require("@ai-sdk/provider");
2796
2840
  var name7 = "AI_InvalidMessageRoleError";
2797
2841
  var marker7 = `vercel.ai.error.${name7}`;
2798
2842
  var symbol7 = Symbol.for(marker7);
2799
2843
  var _a7;
2800
- var InvalidMessageRoleError = class extends import_provider8.AISDKError {
2844
+ var InvalidMessageRoleError = class extends import_provider9.AISDKError {
2801
2845
  constructor({
2802
2846
  role,
2803
2847
  message = `Invalid message role: '${role}'. Must be one of: "system", "user", "assistant", "tool".`
@@ -2807,27 +2851,11 @@ var InvalidMessageRoleError = class extends import_provider8.AISDKError {
2807
2851
  this.role = role;
2808
2852
  }
2809
2853
  static isInstance(error) {
2810
- return import_provider8.AISDKError.hasMarker(error, marker7);
2854
+ return import_provider9.AISDKError.hasMarker(error, marker7);
2811
2855
  }
2812
2856
  };
2813
2857
  _a7 = symbol7;
2814
2858
 
2815
- // core/prompt/split-data-url.ts
2816
- function splitDataUrl(dataUrl) {
2817
- try {
2818
- const [header, base64Content] = dataUrl.split(",");
2819
- return {
2820
- mediaType: header.split(";")[0].split(":")[1],
2821
- base64Content
2822
- };
2823
- } catch (error) {
2824
- return {
2825
- mediaType: void 0,
2826
- base64Content: void 0
2827
- };
2828
- }
2829
- }
2830
-
2831
2859
  // core/prompt/convert-to-language-model-prompt.ts
2832
2860
  async function convertToLanguageModelPrompt({
2833
2861
  prompt,
@@ -2849,14 +2877,13 @@ async function convertToLanguageModelPrompt({
2849
2877
  ];
2850
2878
  }
2851
2879
  function convertToLanguageModelMessage(message, downloadedAssets) {
2852
- var _a17, _b, _c, _d, _e, _f;
2853
2880
  const role = message.role;
2854
2881
  switch (role) {
2855
2882
  case "system": {
2856
2883
  return {
2857
2884
  role: "system",
2858
2885
  content: message.content,
2859
- providerOptions: (_a17 = message.providerOptions) != null ? _a17 : message.experimental_providerMetadata
2886
+ providerOptions: message.providerOptions
2860
2887
  };
2861
2888
  }
2862
2889
  case "user": {
@@ -2864,13 +2891,13 @@ function convertToLanguageModelMessage(message, downloadedAssets) {
2864
2891
  return {
2865
2892
  role: "user",
2866
2893
  content: [{ type: "text", text: message.content }],
2867
- providerOptions: (_b = message.providerOptions) != null ? _b : message.experimental_providerMetadata
2894
+ providerOptions: message.providerOptions
2868
2895
  };
2869
2896
  }
2870
2897
  return {
2871
2898
  role: "user",
2872
2899
  content: message.content.map((part) => convertPartToLanguageModelPart(part, downloadedAssets)).filter((part) => part.type !== "text" || part.text !== ""),
2873
- providerOptions: (_c = message.providerOptions) != null ? _c : message.experimental_providerMetadata
2900
+ providerOptions: message.providerOptions
2874
2901
  };
2875
2902
  }
2876
2903
  case "assistant": {
@@ -2878,7 +2905,7 @@ function convertToLanguageModelMessage(message, downloadedAssets) {
2878
2905
  return {
2879
2906
  role: "assistant",
2880
2907
  content: [{ type: "text", text: message.content }],
2881
- providerOptions: (_d = message.providerOptions) != null ? _d : message.experimental_providerMetadata
2908
+ providerOptions: message.providerOptions
2882
2909
  };
2883
2910
  }
2884
2911
  return {
@@ -2887,15 +2914,18 @@ function convertToLanguageModelMessage(message, downloadedAssets) {
2887
2914
  // remove empty text parts:
2888
2915
  (part) => part.type !== "text" || part.text !== ""
2889
2916
  ).map((part) => {
2890
- var _a18, _b2;
2891
- const providerOptions = (_a18 = part.providerOptions) != null ? _a18 : part.experimental_providerMetadata;
2917
+ var _a17;
2918
+ const providerOptions = part.providerOptions;
2892
2919
  switch (part.type) {
2893
2920
  case "file": {
2921
+ const { data, mediaType } = convertToLanguageModelV2DataContent(
2922
+ part.data
2923
+ );
2894
2924
  return {
2895
2925
  type: "file",
2896
- data: part.data instanceof URL ? part.data : convertDataContentToBase64String(part.data),
2926
+ data,
2897
2927
  filename: part.filename,
2898
- mediaType: (_b2 = part.mediaType) != null ? _b2 : part.mimeType,
2928
+ mediaType: (_a17 = mediaType != null ? mediaType : part.mediaType) != null ? _a17 : part.mimeType,
2899
2929
  providerOptions
2900
2930
  };
2901
2931
  }
@@ -2932,25 +2962,22 @@ function convertToLanguageModelMessage(message, downloadedAssets) {
2932
2962
  }
2933
2963
  }
2934
2964
  }),
2935
- providerOptions: (_e = message.providerOptions) != null ? _e : message.experimental_providerMetadata
2965
+ providerOptions: message.providerOptions
2936
2966
  };
2937
2967
  }
2938
2968
  case "tool": {
2939
2969
  return {
2940
2970
  role: "tool",
2941
- content: message.content.map((part) => {
2942
- var _a18;
2943
- return {
2944
- type: "tool-result",
2945
- toolCallId: part.toolCallId,
2946
- toolName: part.toolName,
2947
- result: part.result,
2948
- content: part.experimental_content,
2949
- isError: part.isError,
2950
- providerOptions: (_a18 = part.providerOptions) != null ? _a18 : part.experimental_providerMetadata
2951
- };
2952
- }),
2953
- providerOptions: (_f = message.providerOptions) != null ? _f : message.experimental_providerMetadata
2971
+ content: message.content.map((part) => ({
2972
+ type: "tool-result",
2973
+ toolCallId: part.toolCallId,
2974
+ toolName: part.toolName,
2975
+ result: part.result,
2976
+ content: part.experimental_content,
2977
+ isError: part.isError,
2978
+ providerOptions: part.providerOptions
2979
+ })),
2980
+ providerOptions: message.providerOptions
2954
2981
  };
2955
2982
  }
2956
2983
  default: {
@@ -2983,71 +3010,48 @@ async function downloadAssets(messages, downloadImplementation, modelSupportsIma
2983
3010
  );
2984
3011
  }
2985
3012
  function convertPartToLanguageModelPart(part, downloadedAssets) {
2986
- var _a17, _b, _c, _d, _e;
3013
+ var _a17, _b, _c;
2987
3014
  if (part.type === "text") {
2988
3015
  return {
2989
3016
  type: "text",
2990
3017
  text: part.text,
2991
- providerOptions: (_a17 = part.providerOptions) != null ? _a17 : part.experimental_providerMetadata
3018
+ providerOptions: part.providerOptions
2992
3019
  };
2993
3020
  }
2994
- let mediaType = (_b = part.mediaType) != null ? _b : part.mimeType;
2995
- let data;
2996
- let content;
2997
- let normalizedData;
3021
+ let originalData;
2998
3022
  const type = part.type;
2999
3023
  switch (type) {
3000
3024
  case "image":
3001
- data = part.image;
3025
+ originalData = part.image;
3002
3026
  break;
3003
3027
  case "file":
3004
- data = part.data;
3028
+ originalData = part.data;
3005
3029
  break;
3006
3030
  default:
3007
3031
  throw new Error(`Unsupported part type: ${type}`);
3008
3032
  }
3009
- try {
3010
- content = typeof data === "string" ? new URL(data) : data;
3011
- } catch (error) {
3012
- content = data;
3013
- }
3014
- if (content instanceof URL) {
3015
- if (content.protocol === "data:") {
3016
- const { mediaType: dataUrlMediaType, base64Content } = splitDataUrl(
3017
- content.toString()
3018
- );
3019
- if (dataUrlMediaType == null || base64Content == null) {
3020
- throw new Error(`Invalid data URL format in part ${type}`);
3021
- }
3022
- mediaType = dataUrlMediaType;
3023
- normalizedData = convertDataContentToUint8Array(base64Content);
3024
- } else {
3025
- const downloadedFile = downloadedAssets[content.toString()];
3026
- if (downloadedFile) {
3027
- normalizedData = downloadedFile.data;
3028
- mediaType != null ? mediaType : mediaType = downloadedFile.mediaType;
3029
- } else {
3030
- normalizedData = content;
3031
- }
3033
+ const { data: convertedData, mediaType: convertedMediaType } = convertToLanguageModelV2DataContent(originalData);
3034
+ let mediaType = (_a17 = convertedMediaType != null ? convertedMediaType : part.mediaType) != null ? _a17 : part.mimeType;
3035
+ let data = convertedData;
3036
+ if (data instanceof URL) {
3037
+ const downloadedFile = downloadedAssets[data.toString()];
3038
+ if (downloadedFile) {
3039
+ data = downloadedFile.data;
3040
+ mediaType = (_b = downloadedFile.mediaType) != null ? _b : mediaType;
3032
3041
  }
3033
- } else {
3034
- normalizedData = convertDataContentToUint8Array(content);
3035
3042
  }
3036
3043
  switch (type) {
3037
3044
  case "image": {
3038
- if (normalizedData instanceof Uint8Array) {
3039
- mediaType = (_c = detectMediaType({
3040
- data: normalizedData,
3041
- signatures: imageMediaTypeSignatures
3042
- })) != null ? _c : mediaType;
3045
+ if (data instanceof Uint8Array || typeof data === "string") {
3046
+ mediaType = (_c = detectMediaType({ data, signatures: imageMediaTypeSignatures })) != null ? _c : mediaType;
3043
3047
  }
3044
3048
  return {
3045
3049
  type: "file",
3046
3050
  mediaType: mediaType != null ? mediaType : "image/*",
3047
3051
  // any image
3048
3052
  filename: void 0,
3049
- data: normalizedData instanceof Uint8Array ? (0, import_provider_utils9.convertUint8ArrayToBase64)(normalizedData) : normalizedData,
3050
- providerOptions: (_d = part.providerOptions) != null ? _d : part.experimental_providerMetadata
3053
+ data,
3054
+ providerOptions: part.providerOptions
3051
3055
  };
3052
3056
  }
3053
3057
  case "file": {
@@ -3058,8 +3062,8 @@ function convertPartToLanguageModelPart(part, downloadedAssets) {
3058
3062
  type: "file",
3059
3063
  mediaType,
3060
3064
  filename: part.filename,
3061
- data: normalizedData instanceof Uint8Array ? convertDataContentToBase64String(normalizedData) : normalizedData,
3062
- providerOptions: (_e = part.providerOptions) != null ? _e : part.experimental_providerMetadata
3065
+ data,
3066
+ providerOptions: part.providerOptions
3063
3067
  };
3064
3068
  }
3065
3069
  }
@@ -3067,7 +3071,7 @@ function convertPartToLanguageModelPart(part, downloadedAssets) {
3067
3071
 
3068
3072
  // core/prompt/prepare-call-settings.ts
3069
3073
  function prepareCallSettings({
3070
- maxTokens,
3074
+ maxOutputTokens,
3071
3075
  temperature,
3072
3076
  topP,
3073
3077
  topK,
@@ -3076,19 +3080,19 @@ function prepareCallSettings({
3076
3080
  stopSequences,
3077
3081
  seed
3078
3082
  }) {
3079
- if (maxTokens != null) {
3080
- if (!Number.isInteger(maxTokens)) {
3083
+ if (maxOutputTokens != null) {
3084
+ if (!Number.isInteger(maxOutputTokens)) {
3081
3085
  throw new InvalidArgumentError({
3082
- parameter: "maxTokens",
3083
- value: maxTokens,
3084
- message: "maxTokens must be an integer"
3086
+ parameter: "maxOutputTokens",
3087
+ value: maxOutputTokens,
3088
+ message: "maxOutputTokens must be an integer"
3085
3089
  });
3086
3090
  }
3087
- if (maxTokens < 1) {
3091
+ if (maxOutputTokens < 1) {
3088
3092
  throw new InvalidArgumentError({
3089
- parameter: "maxTokens",
3090
- value: maxTokens,
3091
- message: "maxTokens must be >= 1"
3093
+ parameter: "maxOutputTokens",
3094
+ value: maxOutputTokens,
3095
+ message: "maxOutputTokens must be >= 1"
3092
3096
  });
3093
3097
  }
3094
3098
  }
@@ -3147,7 +3151,7 @@ function prepareCallSettings({
3147
3151
  }
3148
3152
  }
3149
3153
  return {
3150
- maxTokens,
3154
+ maxOutputTokens,
3151
3155
  // TODO v5 remove default 0 for temperature
3152
3156
  temperature: temperature != null ? temperature : 0,
3153
3157
  topP,
@@ -3160,8 +3164,8 @@ function prepareCallSettings({
3160
3164
  }
3161
3165
 
3162
3166
  // core/prompt/standardize-prompt.ts
3163
- var import_provider10 = require("@ai-sdk/provider");
3164
- var import_provider_utils10 = require("@ai-sdk/provider-utils");
3167
+ var import_provider11 = require("@ai-sdk/provider");
3168
+ var import_provider_utils9 = require("@ai-sdk/provider-utils");
3165
3169
  var import_zod7 = require("zod");
3166
3170
 
3167
3171
  // core/prompt/attachments-to-parts.ts
@@ -3242,12 +3246,12 @@ function attachmentsToParts(attachments) {
3242
3246
  }
3243
3247
 
3244
3248
  // core/prompt/message-conversion-error.ts
3245
- var import_provider9 = require("@ai-sdk/provider");
3249
+ var import_provider10 = require("@ai-sdk/provider");
3246
3250
  var name8 = "AI_MessageConversionError";
3247
3251
  var marker8 = `vercel.ai.error.${name8}`;
3248
3252
  var symbol8 = Symbol.for(marker8);
3249
3253
  var _a8;
3250
- var MessageConversionError = class extends import_provider9.AISDKError {
3254
+ var MessageConversionError = class extends import_provider10.AISDKError {
3251
3255
  constructor({
3252
3256
  originalMessage,
3253
3257
  message
@@ -3257,7 +3261,7 @@ var MessageConversionError = class extends import_provider9.AISDKError {
3257
3261
  this.originalMessage = originalMessage;
3258
3262
  }
3259
3263
  static isInstance(error) {
3260
- return import_provider9.AISDKError.hasMarker(error, marker8);
3264
+ return import_provider10.AISDKError.hasMarker(error, marker8);
3261
3265
  }
3262
3266
  };
3263
3267
  _a8 = symbol8;
@@ -3531,7 +3535,7 @@ function detectSingleMessageCharacteristics(message) {
3531
3535
  "experimental_attachments" in message)) {
3532
3536
  return "has-ui-specific-parts";
3533
3537
  } else if (typeof message === "object" && message !== null && "content" in message && (Array.isArray(message.content) || // Core messages can have array content
3534
- "experimental_providerMetadata" in message || "providerOptions" in message)) {
3538
+ "providerOptions" in message)) {
3535
3539
  return "has-core-specific-parts";
3536
3540
  } else if (typeof message === "object" && message !== null && "role" in message && "content" in message && typeof message.content === "string" && ["system", "user", "assistant", "tool"].includes(message.role)) {
3537
3541
  return "message";
@@ -3585,16 +3589,14 @@ var toolResultContentSchema = import_zod4.z.array(
3585
3589
  var textPartSchema = import_zod5.z.object({
3586
3590
  type: import_zod5.z.literal("text"),
3587
3591
  text: import_zod5.z.string(),
3588
- providerOptions: providerMetadataSchema.optional(),
3589
- experimental_providerMetadata: providerMetadataSchema.optional()
3592
+ providerOptions: providerMetadataSchema.optional()
3590
3593
  });
3591
3594
  var imagePartSchema = import_zod5.z.object({
3592
3595
  type: import_zod5.z.literal("image"),
3593
3596
  image: import_zod5.z.union([dataContentSchema, import_zod5.z.instanceof(URL)]),
3594
3597
  mediaType: import_zod5.z.string().optional(),
3595
3598
  mimeType: import_zod5.z.string().optional(),
3596
- providerOptions: providerMetadataSchema.optional(),
3597
- experimental_providerMetadata: providerMetadataSchema.optional()
3599
+ providerOptions: providerMetadataSchema.optional()
3598
3600
  });
3599
3601
  var filePartSchema = import_zod5.z.object({
3600
3602
  type: import_zod5.z.literal("file"),
@@ -3602,28 +3604,24 @@ var filePartSchema = import_zod5.z.object({
3602
3604
  filename: import_zod5.z.string().optional(),
3603
3605
  mediaType: import_zod5.z.string(),
3604
3606
  mimeType: import_zod5.z.string().optional(),
3605
- providerOptions: providerMetadataSchema.optional(),
3606
- experimental_providerMetadata: providerMetadataSchema.optional()
3607
+ providerOptions: providerMetadataSchema.optional()
3607
3608
  });
3608
3609
  var reasoningPartSchema = import_zod5.z.object({
3609
3610
  type: import_zod5.z.literal("reasoning"),
3610
3611
  text: import_zod5.z.string(),
3611
- providerOptions: providerMetadataSchema.optional(),
3612
- experimental_providerMetadata: providerMetadataSchema.optional()
3612
+ providerOptions: providerMetadataSchema.optional()
3613
3613
  });
3614
3614
  var redactedReasoningPartSchema = import_zod5.z.object({
3615
3615
  type: import_zod5.z.literal("redacted-reasoning"),
3616
3616
  data: import_zod5.z.string(),
3617
- providerOptions: providerMetadataSchema.optional(),
3618
- experimental_providerMetadata: providerMetadataSchema.optional()
3617
+ providerOptions: providerMetadataSchema.optional()
3619
3618
  });
3620
3619
  var toolCallPartSchema = import_zod5.z.object({
3621
3620
  type: import_zod5.z.literal("tool-call"),
3622
3621
  toolCallId: import_zod5.z.string(),
3623
3622
  toolName: import_zod5.z.string(),
3624
3623
  args: import_zod5.z.unknown(),
3625
- providerOptions: providerMetadataSchema.optional(),
3626
- experimental_providerMetadata: providerMetadataSchema.optional()
3624
+ providerOptions: providerMetadataSchema.optional()
3627
3625
  });
3628
3626
  var toolResultPartSchema = import_zod5.z.object({
3629
3627
  type: import_zod5.z.literal("tool-result"),
@@ -3632,16 +3630,14 @@ var toolResultPartSchema = import_zod5.z.object({
3632
3630
  result: import_zod5.z.unknown(),
3633
3631
  content: toolResultContentSchema.optional(),
3634
3632
  isError: import_zod5.z.boolean().optional(),
3635
- providerOptions: providerMetadataSchema.optional(),
3636
- experimental_providerMetadata: providerMetadataSchema.optional()
3633
+ providerOptions: providerMetadataSchema.optional()
3637
3634
  });
3638
3635
 
3639
3636
  // core/prompt/message.ts
3640
3637
  var coreSystemMessageSchema = import_zod6.z.object({
3641
3638
  role: import_zod6.z.literal("system"),
3642
3639
  content: import_zod6.z.string(),
3643
- providerOptions: providerMetadataSchema.optional(),
3644
- experimental_providerMetadata: providerMetadataSchema.optional()
3640
+ providerOptions: providerMetadataSchema.optional()
3645
3641
  });
3646
3642
  var coreUserMessageSchema = import_zod6.z.object({
3647
3643
  role: import_zod6.z.literal("user"),
@@ -3649,8 +3645,7 @@ var coreUserMessageSchema = import_zod6.z.object({
3649
3645
  import_zod6.z.string(),
3650
3646
  import_zod6.z.array(import_zod6.z.union([textPartSchema, imagePartSchema, filePartSchema]))
3651
3647
  ]),
3652
- providerOptions: providerMetadataSchema.optional(),
3653
- experimental_providerMetadata: providerMetadataSchema.optional()
3648
+ providerOptions: providerMetadataSchema.optional()
3654
3649
  });
3655
3650
  var coreAssistantMessageSchema = import_zod6.z.object({
3656
3651
  role: import_zod6.z.literal("assistant"),
@@ -3666,14 +3661,12 @@ var coreAssistantMessageSchema = import_zod6.z.object({
3666
3661
  ])
3667
3662
  )
3668
3663
  ]),
3669
- providerOptions: providerMetadataSchema.optional(),
3670
- experimental_providerMetadata: providerMetadataSchema.optional()
3664
+ providerOptions: providerMetadataSchema.optional()
3671
3665
  });
3672
3666
  var coreToolMessageSchema = import_zod6.z.object({
3673
3667
  role: import_zod6.z.literal("tool"),
3674
3668
  content: import_zod6.z.array(toolResultPartSchema),
3675
- providerOptions: providerMetadataSchema.optional(),
3676
- experimental_providerMetadata: providerMetadataSchema.optional()
3669
+ providerOptions: providerMetadataSchema.optional()
3677
3670
  });
3678
3671
  var coreMessageSchema = import_zod6.z.union([
3679
3672
  coreSystemMessageSchema,
@@ -3688,26 +3681,26 @@ function standardizePrompt({
3688
3681
  tools
3689
3682
  }) {
3690
3683
  if (prompt.prompt == null && prompt.messages == null) {
3691
- throw new import_provider10.InvalidPromptError({
3684
+ throw new import_provider11.InvalidPromptError({
3692
3685
  prompt,
3693
3686
  message: "prompt or messages must be defined"
3694
3687
  });
3695
3688
  }
3696
3689
  if (prompt.prompt != null && prompt.messages != null) {
3697
- throw new import_provider10.InvalidPromptError({
3690
+ throw new import_provider11.InvalidPromptError({
3698
3691
  prompt,
3699
3692
  message: "prompt and messages cannot be defined at the same time"
3700
3693
  });
3701
3694
  }
3702
3695
  if (prompt.system != null && typeof prompt.system !== "string") {
3703
- throw new import_provider10.InvalidPromptError({
3696
+ throw new import_provider11.InvalidPromptError({
3704
3697
  prompt,
3705
3698
  message: "system must be a string"
3706
3699
  });
3707
3700
  }
3708
3701
  if (prompt.prompt != null) {
3709
3702
  if (typeof prompt.prompt !== "string") {
3710
- throw new import_provider10.InvalidPromptError({
3703
+ throw new import_provider11.InvalidPromptError({
3711
3704
  prompt,
3712
3705
  message: "prompt must be a string"
3713
3706
  });
@@ -3726,7 +3719,7 @@ function standardizePrompt({
3726
3719
  if (prompt.messages != null) {
3727
3720
  const promptType = detectPromptType(prompt.messages);
3728
3721
  if (promptType === "other") {
3729
- throw new import_provider10.InvalidPromptError({
3722
+ throw new import_provider11.InvalidPromptError({
3730
3723
  prompt,
3731
3724
  message: "messages must be an array of CoreMessage or UIMessage"
3732
3725
  });
@@ -3735,17 +3728,17 @@ function standardizePrompt({
3735
3728
  tools
3736
3729
  }) : prompt.messages;
3737
3730
  if (messages.length === 0) {
3738
- throw new import_provider10.InvalidPromptError({
3731
+ throw new import_provider11.InvalidPromptError({
3739
3732
  prompt,
3740
3733
  message: "messages must not be empty"
3741
3734
  });
3742
3735
  }
3743
- const validationResult = (0, import_provider_utils10.safeValidateTypes)({
3736
+ const validationResult = (0, import_provider_utils9.safeValidateTypes)({
3744
3737
  value: messages,
3745
3738
  schema: import_zod7.z.array(coreMessageSchema)
3746
3739
  });
3747
3740
  if (!validationResult.success) {
3748
- throw new import_provider10.InvalidPromptError({
3741
+ throw new import_provider11.InvalidPromptError({
3749
3742
  prompt,
3750
3743
  message: "messages must be an array of CoreMessage or UIMessage",
3751
3744
  cause: validationResult.error
@@ -3762,13 +3755,13 @@ function standardizePrompt({
3762
3755
 
3763
3756
  // core/types/usage.ts
3764
3757
  function calculateLanguageModelUsage2({
3765
- promptTokens,
3766
- completionTokens
3758
+ inputTokens,
3759
+ outputTokens
3767
3760
  }) {
3768
3761
  return {
3769
- promptTokens,
3770
- completionTokens,
3771
- totalTokens: promptTokens + completionTokens
3762
+ promptTokens: inputTokens != null ? inputTokens : NaN,
3763
+ completionTokens: outputTokens != null ? outputTokens : NaN,
3764
+ totalTokens: (inputTokens != null ? inputTokens : 0) + (outputTokens != null ? outputTokens : 0)
3772
3765
  };
3773
3766
  }
3774
3767
  function addLanguageModelUsage(usage1, usage2) {
@@ -3800,8 +3793,8 @@ function injectJsonInstruction({
3800
3793
  }
3801
3794
 
3802
3795
  // core/generate-object/output-strategy.ts
3803
- var import_provider11 = require("@ai-sdk/provider");
3804
- var import_provider_utils11 = require("@ai-sdk/provider-utils");
3796
+ var import_provider12 = require("@ai-sdk/provider");
3797
+ var import_provider_utils10 = require("@ai-sdk/provider-utils");
3805
3798
 
3806
3799
  // core/util/async-iterable-stream.ts
3807
3800
  function createAsyncIterableStream(source) {
@@ -3838,7 +3831,7 @@ var noSchemaOutputStrategy = {
3838
3831
  } : { success: true, value };
3839
3832
  },
3840
3833
  createElementStream() {
3841
- throw new import_provider11.UnsupportedFunctionalityError({
3834
+ throw new import_provider12.UnsupportedFunctionalityError({
3842
3835
  functionality: "element streams in no-schema mode"
3843
3836
  });
3844
3837
  }
@@ -3857,10 +3850,10 @@ var objectOutputStrategy = (schema) => ({
3857
3850
  };
3858
3851
  },
3859
3852
  validateFinalResult(value) {
3860
- return (0, import_provider_utils11.safeValidateTypes)({ value, schema });
3853
+ return (0, import_provider_utils10.safeValidateTypes)({ value, schema });
3861
3854
  },
3862
3855
  createElementStream() {
3863
- throw new import_provider11.UnsupportedFunctionalityError({
3856
+ throw new import_provider12.UnsupportedFunctionalityError({
3864
3857
  functionality: "element streams in object mode"
3865
3858
  });
3866
3859
  }
@@ -3883,10 +3876,10 @@ var arrayOutputStrategy = (schema) => {
3883
3876
  },
3884
3877
  validatePartialResult({ value, latestObject, isFirstDelta, isFinalDelta }) {
3885
3878
  var _a17;
3886
- if (!(0, import_provider11.isJSONObject)(value) || !(0, import_provider11.isJSONArray)(value.elements)) {
3879
+ if (!(0, import_provider12.isJSONObject)(value) || !(0, import_provider12.isJSONArray)(value.elements)) {
3887
3880
  return {
3888
3881
  success: false,
3889
- error: new import_provider11.TypeValidationError({
3882
+ error: new import_provider12.TypeValidationError({
3890
3883
  value,
3891
3884
  cause: "value must be an object that contains an array of elements"
3892
3885
  })
@@ -3896,7 +3889,7 @@ var arrayOutputStrategy = (schema) => {
3896
3889
  const resultArray = [];
3897
3890
  for (let i = 0; i < inputArray.length; i++) {
3898
3891
  const element = inputArray[i];
3899
- const result = (0, import_provider_utils11.safeValidateTypes)({ value: element, schema });
3892
+ const result = (0, import_provider_utils10.safeValidateTypes)({ value: element, schema });
3900
3893
  if (i === inputArray.length - 1 && !isFinalDelta) {
3901
3894
  continue;
3902
3895
  }
@@ -3926,10 +3919,10 @@ var arrayOutputStrategy = (schema) => {
3926
3919
  };
3927
3920
  },
3928
3921
  validateFinalResult(value) {
3929
- if (!(0, import_provider11.isJSONObject)(value) || !(0, import_provider11.isJSONArray)(value.elements)) {
3922
+ if (!(0, import_provider12.isJSONObject)(value) || !(0, import_provider12.isJSONArray)(value.elements)) {
3930
3923
  return {
3931
3924
  success: false,
3932
- error: new import_provider11.TypeValidationError({
3925
+ error: new import_provider12.TypeValidationError({
3933
3926
  value,
3934
3927
  cause: "value must be an object that contains an array of elements"
3935
3928
  })
@@ -3937,7 +3930,7 @@ var arrayOutputStrategy = (schema) => {
3937
3930
  }
3938
3931
  const inputArray = value.elements;
3939
3932
  for (const element of inputArray) {
3940
- const result = (0, import_provider_utils11.safeValidateTypes)({ value: element, schema });
3933
+ const result = (0, import_provider_utils10.safeValidateTypes)({ value: element, schema });
3941
3934
  if (!result.success) {
3942
3935
  return result;
3943
3936
  }
@@ -3992,10 +3985,10 @@ var enumOutputStrategy = (enumValues) => {
3992
3985
  additionalProperties: false
3993
3986
  },
3994
3987
  validateFinalResult(value) {
3995
- if (!(0, import_provider11.isJSONObject)(value) || typeof value.result !== "string") {
3988
+ if (!(0, import_provider12.isJSONObject)(value) || typeof value.result !== "string") {
3996
3989
  return {
3997
3990
  success: false,
3998
- error: new import_provider11.TypeValidationError({
3991
+ error: new import_provider12.TypeValidationError({
3999
3992
  value,
4000
3993
  cause: 'value must be an object that contains a string in the "result" property.'
4001
3994
  })
@@ -4004,19 +3997,19 @@ var enumOutputStrategy = (enumValues) => {
4004
3997
  const result = value.result;
4005
3998
  return enumValues.includes(result) ? { success: true, value: result } : {
4006
3999
  success: false,
4007
- error: new import_provider11.TypeValidationError({
4000
+ error: new import_provider12.TypeValidationError({
4008
4001
  value,
4009
4002
  cause: "value must be a string in the enum"
4010
4003
  })
4011
4004
  };
4012
4005
  },
4013
4006
  validatePartialResult() {
4014
- throw new import_provider11.UnsupportedFunctionalityError({
4007
+ throw new import_provider12.UnsupportedFunctionalityError({
4015
4008
  functionality: "partial results in enum mode"
4016
4009
  });
4017
4010
  },
4018
4011
  createElementStream() {
4019
- throw new import_provider11.UnsupportedFunctionalityError({
4012
+ throw new import_provider12.UnsupportedFunctionalityError({
4020
4013
  functionality: "element streams in enum mode"
4021
4014
  });
4022
4015
  }
@@ -4170,7 +4163,7 @@ function validateObjectGenerationInput({
4170
4163
  }
4171
4164
 
4172
4165
  // core/generate-object/generate-object.ts
4173
- var originalGenerateId = (0, import_provider_utils12.createIdGenerator)({ prefix: "aiobj", size: 24 });
4166
+ var originalGenerateId = (0, import_provider_utils11.createIdGenerator)({ prefix: "aiobj", size: 24 });
4174
4167
  async function generateObject({
4175
4168
  model,
4176
4169
  enum: enumValues,
@@ -4188,8 +4181,7 @@ async function generateObject({
4188
4181
  headers,
4189
4182
  experimental_repairText: repairText,
4190
4183
  experimental_telemetry: telemetry,
4191
- experimental_providerMetadata,
4192
- providerOptions = experimental_providerMetadata,
4184
+ providerOptions,
4193
4185
  _internal: {
4194
4186
  generateId: generateId3 = originalGenerateId,
4195
4187
  currentDate = () => /* @__PURE__ */ new Date()
@@ -4296,7 +4288,7 @@ async function generateObject({
4296
4288
  "gen_ai.system": model.provider,
4297
4289
  "gen_ai.request.model": model.modelId,
4298
4290
  "gen_ai.request.frequency_penalty": settings.frequencyPenalty,
4299
- "gen_ai.request.max_tokens": settings.maxTokens,
4291
+ "gen_ai.request.max_tokens": settings.maxOutputTokens,
4300
4292
  "gen_ai.request.presence_penalty": settings.presencePenalty,
4301
4293
  "gen_ai.request.temperature": settings.temperature,
4302
4294
  "gen_ai.request.top_k": settings.topK,
@@ -4344,14 +4336,15 @@ async function generateObject({
4344
4336
  "ai.response.id": responseData.id,
4345
4337
  "ai.response.model": responseData.modelId,
4346
4338
  "ai.response.timestamp": responseData.timestamp.toISOString(),
4347
- "ai.usage.promptTokens": result2.usage.promptTokens,
4348
- "ai.usage.completionTokens": result2.usage.completionTokens,
4339
+ // TODO rename telemetry attributes to inputTokens and outputTokens
4340
+ "ai.usage.promptTokens": result2.usage.inputTokens,
4341
+ "ai.usage.completionTokens": result2.usage.outputTokens,
4349
4342
  // standardized gen-ai llm span attributes:
4350
4343
  "gen_ai.response.finish_reasons": [result2.finishReason],
4351
4344
  "gen_ai.response.id": responseData.id,
4352
4345
  "gen_ai.response.model": responseData.modelId,
4353
- "gen_ai.usage.prompt_tokens": result2.usage.promptTokens,
4354
- "gen_ai.usage.completion_tokens": result2.usage.completionTokens
4346
+ "gen_ai.usage.input_tokens": result2.usage.inputTokens,
4347
+ "gen_ai.usage.output_tokens": result2.usage.outputTokens
4355
4348
  }
4356
4349
  })
4357
4350
  );
@@ -4403,7 +4396,7 @@ async function generateObject({
4403
4396
  "gen_ai.system": model.provider,
4404
4397
  "gen_ai.request.model": model.modelId,
4405
4398
  "gen_ai.request.frequency_penalty": settings.frequencyPenalty,
4406
- "gen_ai.request.max_tokens": settings.maxTokens,
4399
+ "gen_ai.request.max_tokens": settings.maxOutputTokens,
4407
4400
  "gen_ai.request.presence_penalty": settings.presencePenalty,
4408
4401
  "gen_ai.request.temperature": settings.temperature,
4409
4402
  "gen_ai.request.top_k": settings.topK,
@@ -4455,14 +4448,15 @@ async function generateObject({
4455
4448
  "ai.response.id": responseData.id,
4456
4449
  "ai.response.model": responseData.modelId,
4457
4450
  "ai.response.timestamp": responseData.timestamp.toISOString(),
4458
- "ai.usage.promptTokens": result2.usage.promptTokens,
4459
- "ai.usage.completionTokens": result2.usage.completionTokens,
4451
+ // TODO rename telemetry attributes to inputTokens and outputTokens
4452
+ "ai.usage.promptTokens": result2.usage.inputTokens,
4453
+ "ai.usage.completionTokens": result2.usage.outputTokens,
4460
4454
  // standardized gen-ai llm span attributes:
4461
4455
  "gen_ai.response.finish_reasons": [result2.finishReason],
4462
4456
  "gen_ai.response.id": responseData.id,
4463
4457
  "gen_ai.response.model": responseData.modelId,
4464
- "gen_ai.usage.input_tokens": result2.usage.promptTokens,
4465
- "gen_ai.usage.output_tokens": result2.usage.completionTokens
4458
+ "gen_ai.usage.input_tokens": result2.usage.inputTokens,
4459
+ "gen_ai.usage.output_tokens": result2.usage.outputTokens
4466
4460
  }
4467
4461
  })
4468
4462
  );
@@ -4491,7 +4485,7 @@ async function generateObject({
4491
4485
  }
4492
4486
  }
4493
4487
  function processResult(result2) {
4494
- const parseResult = (0, import_provider_utils12.safeParseJSON)({ text: result2 });
4488
+ const parseResult = (0, import_provider_utils11.safeParseJSON)({ text: result2 });
4495
4489
  if (!parseResult.success) {
4496
4490
  throw new NoObjectGeneratedError({
4497
4491
  message: "No object generated: could not parse the response.",
@@ -4526,7 +4520,7 @@ async function generateObject({
4526
4520
  try {
4527
4521
  object2 = processResult(result);
4528
4522
  } catch (error) {
4529
- if (repairText != null && NoObjectGeneratedError.isInstance(error) && (import_provider12.JSONParseError.isInstance(error.cause) || import_provider12.TypeValidationError.isInstance(error.cause))) {
4523
+ if (repairText != null && NoObjectGeneratedError.isInstance(error) && (import_provider13.JSONParseError.isInstance(error.cause) || import_provider13.TypeValidationError.isInstance(error.cause))) {
4530
4524
  const repairedText = await repairText({
4531
4525
  text: result,
4532
4526
  error: error.cause
@@ -4547,8 +4541,9 @@ async function generateObject({
4547
4541
  "ai.response.object": {
4548
4542
  output: () => JSON.stringify(object2)
4549
4543
  },
4550
- "ai.usage.promptTokens": usage.promptTokens,
4551
- "ai.usage.completionTokens": usage.completionTokens
4544
+ // TODO rename telemetry attributes to inputTokens and outputTokens
4545
+ "ai.usage.promptTokens": usage.inputTokens,
4546
+ "ai.usage.completionTokens": usage.outputTokens
4552
4547
  }
4553
4548
  })
4554
4549
  );
@@ -4572,7 +4567,6 @@ var DefaultGenerateObjectResult = class {
4572
4567
  this.usage = options.usage;
4573
4568
  this.warnings = options.warnings;
4574
4569
  this.providerMetadata = options.providerMetadata;
4575
- this.experimental_providerMetadata = options.providerMetadata;
4576
4570
  this.response = options.response;
4577
4571
  this.request = options.request;
4578
4572
  this.logprobs = options.logprobs;
@@ -4589,7 +4583,7 @@ var DefaultGenerateObjectResult = class {
4589
4583
  };
4590
4584
 
4591
4585
  // core/generate-object/stream-object.ts
4592
- var import_provider_utils13 = require("@ai-sdk/provider-utils");
4586
+ var import_provider_utils12 = require("@ai-sdk/provider-utils");
4593
4587
 
4594
4588
  // util/delayed-promise.ts
4595
4589
  var DelayedPromise = class {
@@ -4733,7 +4727,7 @@ function now() {
4733
4727
  }
4734
4728
 
4735
4729
  // core/generate-object/stream-object.ts
4736
- var originalGenerateId2 = (0, import_provider_utils13.createIdGenerator)({ prefix: "aiobj", size: 24 });
4730
+ var originalGenerateId2 = (0, import_provider_utils12.createIdGenerator)({ prefix: "aiobj", size: 24 });
4737
4731
  function streamObject({
4738
4732
  model,
4739
4733
  schema: inputSchema,
@@ -4748,8 +4742,7 @@ function streamObject({
4748
4742
  abortSignal,
4749
4743
  headers,
4750
4744
  experimental_telemetry: telemetry,
4751
- experimental_providerMetadata,
4752
- providerOptions = experimental_providerMetadata,
4745
+ providerOptions,
4753
4746
  onError,
4754
4747
  onFinish,
4755
4748
  _internal: {
@@ -4998,7 +4991,7 @@ var DefaultStreamObjectResult = class {
4998
4991
  "gen_ai.system": model.provider,
4999
4992
  "gen_ai.request.model": model.modelId,
5000
4993
  "gen_ai.request.frequency_penalty": settings.frequencyPenalty,
5001
- "gen_ai.request.max_tokens": settings.maxTokens,
4994
+ "gen_ai.request.max_tokens": settings.maxOutputTokens,
5002
4995
  "gen_ai.request.presence_penalty": settings.presencePenalty,
5003
4996
  "gen_ai.request.temperature": settings.temperature,
5004
4997
  "gen_ai.request.top_k": settings.topK,
@@ -5186,8 +5179,7 @@ var DefaultStreamObjectResult = class {
5186
5179
  headers: response == null ? void 0 : response.headers
5187
5180
  },
5188
5181
  warnings,
5189
- providerMetadata,
5190
- experimental_providerMetadata: providerMetadata
5182
+ providerMetadata
5191
5183
  }));
5192
5184
  } catch (error2) {
5193
5185
  controller.enqueue({ type: "error", error: error2 });
@@ -5219,9 +5211,6 @@ var DefaultStreamObjectResult = class {
5219
5211
  get usage() {
5220
5212
  return this.usagePromise.value;
5221
5213
  }
5222
- get experimental_providerMetadata() {
5223
- return this.providerMetadataPromise.value;
5224
- }
5225
5214
  get providerMetadata() {
5226
5215
  return this.providerMetadataPromise.value;
5227
5216
  }
@@ -5309,39 +5298,39 @@ var DefaultStreamObjectResult = class {
5309
5298
  };
5310
5299
 
5311
5300
  // core/generate-text/generate-text.ts
5312
- var import_provider_utils15 = require("@ai-sdk/provider-utils");
5301
+ var import_provider_utils14 = require("@ai-sdk/provider-utils");
5313
5302
 
5314
5303
  // errors/no-output-specified-error.ts
5315
- var import_provider13 = require("@ai-sdk/provider");
5304
+ var import_provider14 = require("@ai-sdk/provider");
5316
5305
  var name9 = "AI_NoOutputSpecifiedError";
5317
5306
  var marker9 = `vercel.ai.error.${name9}`;
5318
5307
  var symbol9 = Symbol.for(marker9);
5319
5308
  var _a9;
5320
- var NoOutputSpecifiedError = class extends import_provider13.AISDKError {
5309
+ var NoOutputSpecifiedError = class extends import_provider14.AISDKError {
5321
5310
  // used in isInstance
5322
5311
  constructor({ message = "No output specified." } = {}) {
5323
5312
  super({ name: name9, message });
5324
5313
  this[_a9] = true;
5325
5314
  }
5326
5315
  static isInstance(error) {
5327
- return import_provider13.AISDKError.hasMarker(error, marker9);
5316
+ return import_provider14.AISDKError.hasMarker(error, marker9);
5328
5317
  }
5329
5318
  };
5330
5319
  _a9 = symbol9;
5331
5320
 
5332
5321
  // errors/tool-execution-error.ts
5333
- var import_provider14 = require("@ai-sdk/provider");
5322
+ var import_provider15 = require("@ai-sdk/provider");
5334
5323
  var name10 = "AI_ToolExecutionError";
5335
5324
  var marker10 = `vercel.ai.error.${name10}`;
5336
5325
  var symbol10 = Symbol.for(marker10);
5337
5326
  var _a10;
5338
- var ToolExecutionError = class extends import_provider14.AISDKError {
5327
+ var ToolExecutionError = class extends import_provider15.AISDKError {
5339
5328
  constructor({
5340
5329
  toolArgs,
5341
5330
  toolName,
5342
5331
  toolCallId,
5343
5332
  cause,
5344
- message = `Error executing tool ${toolName}: ${(0, import_provider14.getErrorMessage)(cause)}`
5333
+ message = `Error executing tool ${toolName}: ${(0, import_provider15.getErrorMessage)(cause)}`
5345
5334
  }) {
5346
5335
  super({ name: name10, message, cause });
5347
5336
  this[_a10] = true;
@@ -5350,7 +5339,7 @@ var ToolExecutionError = class extends import_provider14.AISDKError {
5350
5339
  this.toolCallId = toolCallId;
5351
5340
  }
5352
5341
  static isInstance(error) {
5353
- return import_provider14.AISDKError.hasMarker(error, marker10);
5342
+ return import_provider15.AISDKError.hasMarker(error, marker10);
5354
5343
  }
5355
5344
  };
5356
5345
  _a10 = symbol10;
@@ -5418,20 +5407,20 @@ function removeTextAfterLastWhitespace(text2) {
5418
5407
  }
5419
5408
 
5420
5409
  // core/generate-text/parse-tool-call.ts
5421
- var import_provider_utils14 = require("@ai-sdk/provider-utils");
5410
+ var import_provider_utils13 = require("@ai-sdk/provider-utils");
5422
5411
 
5423
5412
  // errors/invalid-tool-arguments-error.ts
5424
- var import_provider15 = require("@ai-sdk/provider");
5413
+ var import_provider16 = require("@ai-sdk/provider");
5425
5414
  var name11 = "AI_InvalidToolArgumentsError";
5426
5415
  var marker11 = `vercel.ai.error.${name11}`;
5427
5416
  var symbol11 = Symbol.for(marker11);
5428
5417
  var _a11;
5429
- var InvalidToolArgumentsError = class extends import_provider15.AISDKError {
5418
+ var InvalidToolArgumentsError = class extends import_provider16.AISDKError {
5430
5419
  constructor({
5431
5420
  toolArgs,
5432
5421
  toolName,
5433
5422
  cause,
5434
- message = `Invalid arguments for tool ${toolName}: ${(0, import_provider15.getErrorMessage)(
5423
+ message = `Invalid arguments for tool ${toolName}: ${(0, import_provider16.getErrorMessage)(
5435
5424
  cause
5436
5425
  )}`
5437
5426
  }) {
@@ -5441,18 +5430,18 @@ var InvalidToolArgumentsError = class extends import_provider15.AISDKError {
5441
5430
  this.toolName = toolName;
5442
5431
  }
5443
5432
  static isInstance(error) {
5444
- return import_provider15.AISDKError.hasMarker(error, marker11);
5433
+ return import_provider16.AISDKError.hasMarker(error, marker11);
5445
5434
  }
5446
5435
  };
5447
5436
  _a11 = symbol11;
5448
5437
 
5449
5438
  // errors/no-such-tool-error.ts
5450
- var import_provider16 = require("@ai-sdk/provider");
5439
+ var import_provider17 = require("@ai-sdk/provider");
5451
5440
  var name12 = "AI_NoSuchToolError";
5452
5441
  var marker12 = `vercel.ai.error.${name12}`;
5453
5442
  var symbol12 = Symbol.for(marker12);
5454
5443
  var _a12;
5455
- var NoSuchToolError = class extends import_provider16.AISDKError {
5444
+ var NoSuchToolError = class extends import_provider17.AISDKError {
5456
5445
  constructor({
5457
5446
  toolName,
5458
5447
  availableTools = void 0,
@@ -5464,29 +5453,29 @@ var NoSuchToolError = class extends import_provider16.AISDKError {
5464
5453
  this.availableTools = availableTools;
5465
5454
  }
5466
5455
  static isInstance(error) {
5467
- return import_provider16.AISDKError.hasMarker(error, marker12);
5456
+ return import_provider17.AISDKError.hasMarker(error, marker12);
5468
5457
  }
5469
5458
  };
5470
5459
  _a12 = symbol12;
5471
5460
 
5472
5461
  // errors/tool-call-repair-error.ts
5473
- var import_provider17 = require("@ai-sdk/provider");
5462
+ var import_provider18 = require("@ai-sdk/provider");
5474
5463
  var name13 = "AI_ToolCallRepairError";
5475
5464
  var marker13 = `vercel.ai.error.${name13}`;
5476
5465
  var symbol13 = Symbol.for(marker13);
5477
5466
  var _a13;
5478
- var ToolCallRepairError = class extends import_provider17.AISDKError {
5467
+ var ToolCallRepairError = class extends import_provider18.AISDKError {
5479
5468
  constructor({
5480
5469
  cause,
5481
5470
  originalError,
5482
- message = `Error repairing tool call: ${(0, import_provider17.getErrorMessage)(cause)}`
5471
+ message = `Error repairing tool call: ${(0, import_provider18.getErrorMessage)(cause)}`
5483
5472
  }) {
5484
5473
  super({ name: name13, message, cause });
5485
5474
  this[_a13] = true;
5486
5475
  this.originalError = originalError;
5487
5476
  }
5488
5477
  static isInstance(error) {
5489
- return import_provider17.AISDKError.hasMarker(error, marker13);
5478
+ return import_provider18.AISDKError.hasMarker(error, marker13);
5490
5479
  }
5491
5480
  };
5492
5481
  _a13 = symbol13;
@@ -5513,7 +5502,10 @@ async function parseToolCall({
5513
5502
  repairedToolCall = await repairToolCall({
5514
5503
  toolCall,
5515
5504
  tools,
5516
- parameterSchema: ({ toolName }) => asSchema(tools[toolName].parameters).jsonSchema,
5505
+ parameterSchema: ({ toolName }) => {
5506
+ const { parameters } = tools[toolName];
5507
+ return asSchema(parameters).jsonSchema;
5508
+ },
5517
5509
  system,
5518
5510
  messages,
5519
5511
  error
@@ -5543,7 +5535,7 @@ async function doParseToolCall({
5543
5535
  });
5544
5536
  }
5545
5537
  const schema = asSchema(tool2.parameters);
5546
- const parseResult = toolCall.args.trim() === "" ? (0, import_provider_utils14.safeValidateTypes)({ value: {}, schema }) : (0, import_provider_utils14.safeParseJSON)({ text: toolCall.args, schema });
5538
+ const parseResult = toolCall.args.trim() === "" ? (0, import_provider_utils13.safeValidateTypes)({ value: {}, schema }) : (0, import_provider_utils13.safeParseJSON)({ text: toolCall.args, schema });
5547
5539
  if (parseResult.success === false) {
5548
5540
  throw new InvalidToolArgumentsError({
5549
5541
  toolName,
@@ -5555,7 +5547,7 @@ async function doParseToolCall({
5555
5547
  type: "tool-call",
5556
5548
  toolCallId: toolCall.toolCallId,
5557
5549
  toolName,
5558
- args: parseResult.value
5550
+ args: parseResult == null ? void 0 : parseResult.value
5559
5551
  };
5560
5552
  }
5561
5553
 
@@ -5621,11 +5613,11 @@ function toResponseMessages({
5621
5613
  }
5622
5614
 
5623
5615
  // core/generate-text/generate-text.ts
5624
- var originalGenerateId3 = (0, import_provider_utils15.createIdGenerator)({
5616
+ var originalGenerateId3 = (0, import_provider_utils14.createIdGenerator)({
5625
5617
  prefix: "aitxt",
5626
5618
  size: 24
5627
5619
  });
5628
- var originalGenerateMessageId = (0, import_provider_utils15.createIdGenerator)({
5620
+ var originalGenerateMessageId = (0, import_provider_utils14.createIdGenerator)({
5629
5621
  prefix: "msg",
5630
5622
  size: 24
5631
5623
  });
@@ -5644,8 +5636,7 @@ async function generateText({
5644
5636
  experimental_output: output,
5645
5637
  experimental_continueSteps: continueSteps = false,
5646
5638
  experimental_telemetry: telemetry,
5647
- experimental_providerMetadata,
5648
- providerOptions = experimental_providerMetadata,
5639
+ providerOptions,
5649
5640
  experimental_activeTools: activeTools,
5650
5641
  experimental_repairToolCall: repairToolCall,
5651
5642
  _internal: {
@@ -5763,7 +5754,7 @@ async function generateText({
5763
5754
  "gen_ai.system": model.provider,
5764
5755
  "gen_ai.request.model": model.modelId,
5765
5756
  "gen_ai.request.frequency_penalty": settings.frequencyPenalty,
5766
- "gen_ai.request.max_tokens": settings.maxTokens,
5757
+ "gen_ai.request.max_tokens": settings.maxOutputTokens,
5767
5758
  "gen_ai.request.presence_penalty": settings.presencePenalty,
5768
5759
  "gen_ai.request.stop_sequences": settings.stopSequences,
5769
5760
  "gen_ai.request.temperature": settings.temperature,
@@ -5805,14 +5796,15 @@ async function generateText({
5805
5796
  "ai.response.id": responseData.id,
5806
5797
  "ai.response.model": responseData.modelId,
5807
5798
  "ai.response.timestamp": responseData.timestamp.toISOString(),
5808
- "ai.usage.promptTokens": result.usage.promptTokens,
5809
- "ai.usage.completionTokens": result.usage.completionTokens,
5799
+ // TODO rename telemetry attributes to inputTokens and outputTokens
5800
+ "ai.usage.promptTokens": result.usage.inputTokens,
5801
+ "ai.usage.completionTokens": result.usage.outputTokens,
5810
5802
  // standardized gen-ai llm span attributes:
5811
5803
  "gen_ai.response.finish_reasons": [result.finishReason],
5812
5804
  "gen_ai.response.id": responseData.id,
5813
5805
  "gen_ai.response.model": responseData.modelId,
5814
- "gen_ai.usage.input_tokens": result.usage.promptTokens,
5815
- "gen_ai.usage.output_tokens": result.usage.completionTokens
5806
+ "gen_ai.usage.input_tokens": result.usage.inputTokens,
5807
+ "gen_ai.usage.output_tokens": result.usage.outputTokens
5816
5808
  }
5817
5809
  })
5818
5810
  );
@@ -5910,7 +5902,6 @@ async function generateText({
5910
5902
  messages: structuredClone(responseMessages)
5911
5903
  },
5912
5904
  providerMetadata: currentModelResponse.providerMetadata,
5913
- experimental_providerMetadata: currentModelResponse.providerMetadata,
5914
5905
  isContinued: nextStepType === "continue"
5915
5906
  };
5916
5907
  steps.push(currentStepResult);
@@ -5928,8 +5919,9 @@ async function generateText({
5928
5919
  "ai.response.toolCalls": {
5929
5920
  output: () => JSON.stringify(currentModelResponse.toolCalls)
5930
5921
  },
5931
- "ai.usage.promptTokens": currentModelResponse.usage.promptTokens,
5932
- "ai.usage.completionTokens": currentModelResponse.usage.completionTokens
5922
+ // TODO rename telemetry attributes to inputTokens and outputTokens
5923
+ "ai.usage.promptTokens": currentModelResponse.usage.inputTokens,
5924
+ "ai.usage.completionTokens": currentModelResponse.usage.outputTokens
5933
5925
  }
5934
5926
  })
5935
5927
  );
@@ -6058,7 +6050,6 @@ var DefaultGenerateTextResult = class {
6058
6050
  this.request = options.request;
6059
6051
  this.response = options.response;
6060
6052
  this.steps = options.steps;
6061
- this.experimental_providerMetadata = options.providerMetadata;
6062
6053
  this.providerMetadata = options.providerMetadata;
6063
6054
  this.logprobs = options.logprobs;
6064
6055
  this.outputResolver = options.outputResolver;
@@ -6088,18 +6079,18 @@ __export(output_exports, {
6088
6079
  object: () => object,
6089
6080
  text: () => text
6090
6081
  });
6091
- var import_provider_utils16 = require("@ai-sdk/provider-utils");
6082
+ var import_provider_utils15 = require("@ai-sdk/provider-utils");
6092
6083
 
6093
6084
  // errors/index.ts
6094
- var import_provider20 = require("@ai-sdk/provider");
6085
+ var import_provider21 = require("@ai-sdk/provider");
6095
6086
 
6096
6087
  // errors/invalid-stream-part-error.ts
6097
- var import_provider18 = require("@ai-sdk/provider");
6088
+ var import_provider19 = require("@ai-sdk/provider");
6098
6089
  var name14 = "AI_InvalidStreamPartError";
6099
6090
  var marker14 = `vercel.ai.error.${name14}`;
6100
6091
  var symbol14 = Symbol.for(marker14);
6101
6092
  var _a14;
6102
- var InvalidStreamPartError = class extends import_provider18.AISDKError {
6093
+ var InvalidStreamPartError = class extends import_provider19.AISDKError {
6103
6094
  constructor({
6104
6095
  chunk,
6105
6096
  message
@@ -6109,18 +6100,18 @@ var InvalidStreamPartError = class extends import_provider18.AISDKError {
6109
6100
  this.chunk = chunk;
6110
6101
  }
6111
6102
  static isInstance(error) {
6112
- return import_provider18.AISDKError.hasMarker(error, marker14);
6103
+ return import_provider19.AISDKError.hasMarker(error, marker14);
6113
6104
  }
6114
6105
  };
6115
6106
  _a14 = symbol14;
6116
6107
 
6117
6108
  // errors/mcp-client-error.ts
6118
- var import_provider19 = require("@ai-sdk/provider");
6109
+ var import_provider20 = require("@ai-sdk/provider");
6119
6110
  var name15 = "AI_MCPClientError";
6120
6111
  var marker15 = `vercel.ai.error.${name15}`;
6121
6112
  var symbol15 = Symbol.for(marker15);
6122
6113
  var _a15;
6123
- var MCPClientError = class extends import_provider19.AISDKError {
6114
+ var MCPClientError = class extends import_provider20.AISDKError {
6124
6115
  constructor({
6125
6116
  name: name17 = "MCPClientError",
6126
6117
  message,
@@ -6130,7 +6121,7 @@ var MCPClientError = class extends import_provider19.AISDKError {
6130
6121
  this[_a15] = true;
6131
6122
  }
6132
6123
  static isInstance(error) {
6133
- return import_provider19.AISDKError.hasMarker(error, marker15);
6124
+ return import_provider20.AISDKError.hasMarker(error, marker15);
6134
6125
  }
6135
6126
  };
6136
6127
  _a15 = symbol15;
@@ -6184,7 +6175,7 @@ var object = ({
6184
6175
  }
6185
6176
  },
6186
6177
  parseOutput({ text: text2 }, context) {
6187
- const parseResult = (0, import_provider_utils16.safeParseJSON)({ text: text2 });
6178
+ const parseResult = (0, import_provider_utils15.safeParseJSON)({ text: text2 });
6188
6179
  if (!parseResult.success) {
6189
6180
  throw new NoObjectGeneratedError({
6190
6181
  message: "No object generated: could not parse the response.",
@@ -6195,7 +6186,7 @@ var object = ({
6195
6186
  finishReason: context.finishReason
6196
6187
  });
6197
6188
  }
6198
- const validationResult = (0, import_provider_utils16.safeValidateTypes)({
6189
+ const validationResult = (0, import_provider_utils15.safeValidateTypes)({
6199
6190
  value: parseResult.value,
6200
6191
  schema
6201
6192
  });
@@ -6215,8 +6206,8 @@ var object = ({
6215
6206
  };
6216
6207
 
6217
6208
  // core/generate-text/smooth-stream.ts
6218
- var import_provider_utils17 = require("@ai-sdk/provider-utils");
6219
- var import_provider21 = require("@ai-sdk/provider");
6209
+ var import_provider_utils16 = require("@ai-sdk/provider-utils");
6210
+ var import_provider22 = require("@ai-sdk/provider");
6220
6211
  var CHUNKING_REGEXPS = {
6221
6212
  word: /\S+\s+/m,
6222
6213
  line: /\n+/m
@@ -6224,7 +6215,7 @@ var CHUNKING_REGEXPS = {
6224
6215
  function smoothStream({
6225
6216
  delayInMs = 10,
6226
6217
  chunking = "word",
6227
- _internal: { delay: delay2 = import_provider_utils17.delay } = {}
6218
+ _internal: { delay: delay2 = import_provider_utils16.delay } = {}
6228
6219
  } = {}) {
6229
6220
  let detectChunk;
6230
6221
  if (typeof chunking === "function") {
@@ -6246,7 +6237,7 @@ function smoothStream({
6246
6237
  } else {
6247
6238
  const chunkingRegex = typeof chunking === "string" ? CHUNKING_REGEXPS[chunking] : chunking;
6248
6239
  if (chunkingRegex == null) {
6249
- throw new import_provider21.InvalidArgumentError({
6240
+ throw new import_provider22.InvalidArgumentError({
6250
6241
  argument: "chunking",
6251
6242
  message: `Chunking must be "word" or "line" or a RegExp. Received: ${chunking}`
6252
6243
  });
@@ -6284,8 +6275,8 @@ function smoothStream({
6284
6275
  }
6285
6276
 
6286
6277
  // core/generate-text/stream-text.ts
6287
- var import_provider22 = require("@ai-sdk/provider");
6288
- var import_provider_utils18 = require("@ai-sdk/provider-utils");
6278
+ var import_provider23 = require("@ai-sdk/provider");
6279
+ var import_provider_utils17 = require("@ai-sdk/provider-utils");
6289
6280
 
6290
6281
  // util/as-array.ts
6291
6282
  function asArray(value) {
@@ -6559,7 +6550,7 @@ function runToolsTransformation({
6559
6550
  finishReason: chunk.finishReason,
6560
6551
  logprobs: chunk.logprobs,
6561
6552
  usage: calculateLanguageModelUsage2(chunk.usage),
6562
- experimental_providerMetadata: chunk.providerMetadata
6553
+ providerMetadata: chunk.providerMetadata
6563
6554
  };
6564
6555
  break;
6565
6556
  }
@@ -6602,11 +6593,11 @@ function runToolsTransformation({
6602
6593
  }
6603
6594
 
6604
6595
  // core/generate-text/stream-text.ts
6605
- var originalGenerateId4 = (0, import_provider_utils18.createIdGenerator)({
6596
+ var originalGenerateId4 = (0, import_provider_utils17.createIdGenerator)({
6606
6597
  prefix: "aitxt",
6607
6598
  size: 24
6608
6599
  });
6609
- var originalGenerateMessageId2 = (0, import_provider_utils18.createIdGenerator)({
6600
+ var originalGenerateMessageId2 = (0, import_provider_utils17.createIdGenerator)({
6610
6601
  prefix: "msg",
6611
6602
  size: 24
6612
6603
  });
@@ -6625,8 +6616,7 @@ function streamText({
6625
6616
  experimental_output: output,
6626
6617
  experimental_continueSteps: continueSteps = false,
6627
6618
  experimental_telemetry: telemetry,
6628
- experimental_providerMetadata,
6629
- providerOptions = experimental_providerMetadata,
6619
+ providerOptions,
6630
6620
  experimental_toolCallStreaming = false,
6631
6621
  toolCallStreaming = experimental_toolCallStreaming,
6632
6622
  experimental_activeTools: activeTools,
@@ -6820,7 +6810,7 @@ var DefaultStreamTextResult = class {
6820
6810
  }
6821
6811
  if (part.type === "reasoning-signature") {
6822
6812
  if (activeReasoningText == null) {
6823
- throw new import_provider22.AISDKError({
6813
+ throw new import_provider23.AISDKError({
6824
6814
  name: "InvalidStreamPart",
6825
6815
  message: "reasoning-signature without reasoning"
6826
6816
  });
@@ -6887,8 +6877,7 @@ var DefaultStreamTextResult = class {
6887
6877
  ...part.response,
6888
6878
  messages: [...recordedResponse.messages, ...stepMessages]
6889
6879
  },
6890
- providerMetadata: part.experimental_providerMetadata,
6891
- experimental_providerMetadata: part.experimental_providerMetadata,
6880
+ providerMetadata: part.providerMetadata,
6892
6881
  isContinued: part.isContinued
6893
6882
  };
6894
6883
  await (onStepFinish == null ? void 0 : onStepFinish(currentStepResult));
@@ -6929,9 +6918,7 @@ var DefaultStreamTextResult = class {
6929
6918
  self.responsePromise.resolve(lastStep.response);
6930
6919
  self.toolCallsPromise.resolve(lastStep.toolCalls);
6931
6920
  self.toolResultsPromise.resolve(lastStep.toolResults);
6932
- self.providerMetadataPromise.resolve(
6933
- lastStep.experimental_providerMetadata
6934
- );
6921
+ self.providerMetadataPromise.resolve(lastStep.providerMetadata);
6935
6922
  self.reasoningPromise.resolve(lastStep.reasoning);
6936
6923
  self.reasoningDetailsPromise.resolve(lastStep.reasoningDetails);
6937
6924
  const finishReason = recordedFinishReason != null ? recordedFinishReason : "unknown";
@@ -6961,7 +6948,6 @@ var DefaultStreamTextResult = class {
6961
6948
  response: lastStep.response,
6962
6949
  warnings: lastStep.warnings,
6963
6950
  providerMetadata: lastStep.providerMetadata,
6964
- experimental_providerMetadata: lastStep.experimental_providerMetadata,
6965
6951
  steps: recordedSteps
6966
6952
  }));
6967
6953
  rootSpan.setAttributes(
@@ -7105,7 +7091,7 @@ var DefaultStreamTextResult = class {
7105
7091
  "gen_ai.system": model.provider,
7106
7092
  "gen_ai.request.model": model.modelId,
7107
7093
  "gen_ai.request.frequency_penalty": settings.frequencyPenalty,
7108
- "gen_ai.request.max_tokens": settings.maxTokens,
7094
+ "gen_ai.request.max_tokens": settings.maxOutputTokens,
7109
7095
  "gen_ai.request.presence_penalty": settings.presencePenalty,
7110
7096
  "gen_ai.request.stop_sequences": settings.stopSequences,
7111
7097
  "gen_ai.request.temperature": settings.temperature,
@@ -7283,7 +7269,7 @@ var DefaultStreamTextResult = class {
7283
7269
  case "finish": {
7284
7270
  stepUsage = chunk.usage;
7285
7271
  stepFinishReason = chunk.finishReason;
7286
- stepProviderMetadata = chunk.experimental_providerMetadata;
7272
+ stepProviderMetadata = chunk.providerMetadata;
7287
7273
  stepLogProbs = chunk.logprobs;
7288
7274
  const msToFinish = now2() - startTimestampMs;
7289
7275
  doStreamSpan.addEvent("ai.stream.finish");
@@ -7375,7 +7361,6 @@ var DefaultStreamTextResult = class {
7375
7361
  finishReason: stepFinishReason,
7376
7362
  usage: stepUsage,
7377
7363
  providerMetadata: stepProviderMetadata,
7378
- experimental_providerMetadata: stepProviderMetadata,
7379
7364
  logprobs: stepLogProbs,
7380
7365
  request: stepRequest,
7381
7366
  response: {
@@ -7393,7 +7378,6 @@ var DefaultStreamTextResult = class {
7393
7378
  finishReason: stepFinishReason,
7394
7379
  usage: combinedUsage,
7395
7380
  providerMetadata: stepProviderMetadata,
7396
- experimental_providerMetadata: stepProviderMetadata,
7397
7381
  logprobs: stepLogProbs,
7398
7382
  response: {
7399
7383
  ...stepResponse,
@@ -7479,9 +7463,6 @@ var DefaultStreamTextResult = class {
7479
7463
  get finishReason() {
7480
7464
  return this.finishReasonPromise.value;
7481
7465
  }
7482
- get experimental_providerMetadata() {
7483
- return this.providerMetadataPromise.value;
7484
- }
7485
7466
  get providerMetadata() {
7486
7467
  return this.providerMetadataPromise.value;
7487
7468
  }
@@ -7832,8 +7813,8 @@ var DefaultStreamTextResult = class {
7832
7813
  };
7833
7814
 
7834
7815
  // errors/no-transcript-generated-error.ts
7835
- var import_provider23 = require("@ai-sdk/provider");
7836
- var NoTranscriptGeneratedError = class extends import_provider23.AISDKError {
7816
+ var import_provider24 = require("@ai-sdk/provider");
7817
+ var NoTranscriptGeneratedError = class extends import_provider24.AISDKError {
7837
7818
  constructor(options) {
7838
7819
  super({
7839
7820
  name: "AI_NoTranscriptGeneratedError",
@@ -8136,8 +8117,8 @@ function simulateStreamingMiddleware() {
8136
8117
  });
8137
8118
  return {
8138
8119
  stream: simulatedStream,
8139
- rawCall: result.rawCall,
8140
- rawResponse: result.response,
8120
+ request: result.request,
8121
+ response: result.response,
8141
8122
  warnings: result.warnings
8142
8123
  };
8143
8124
  }
@@ -8209,7 +8190,7 @@ function appendClientMessage({
8209
8190
  }
8210
8191
 
8211
8192
  // core/prompt/append-response-messages.ts
8212
- var import_provider24 = require("@ai-sdk/provider");
8193
+ var import_provider25 = require("@ai-sdk/provider");
8213
8194
  function appendResponseMessages({
8214
8195
  messages,
8215
8196
  responseMessages,
@@ -8292,7 +8273,7 @@ function appendResponseMessages({
8292
8273
  break;
8293
8274
  case "file":
8294
8275
  if (part.data instanceof URL) {
8295
- throw new import_provider24.AISDKError({
8276
+ throw new import_provider25.AISDKError({
8296
8277
  name: "InvalidAssistantFileData",
8297
8278
  message: "File data cannot be a URL"
8298
8279
  });
@@ -8386,7 +8367,7 @@ function appendResponseMessages({
8386
8367
  }
8387
8368
 
8388
8369
  // core/registry/custom-provider.ts
8389
- var import_provider25 = require("@ai-sdk/provider");
8370
+ var import_provider26 = require("@ai-sdk/provider");
8390
8371
  function customProvider({
8391
8372
  languageModels,
8392
8373
  textEmbeddingModels,
@@ -8401,7 +8382,7 @@ function customProvider({
8401
8382
  if (fallbackProvider) {
8402
8383
  return fallbackProvider.languageModel(modelId);
8403
8384
  }
8404
- throw new import_provider25.NoSuchModelError({ modelId, modelType: "languageModel" });
8385
+ throw new import_provider26.NoSuchModelError({ modelId, modelType: "languageModel" });
8405
8386
  },
8406
8387
  textEmbeddingModel(modelId) {
8407
8388
  if (textEmbeddingModels != null && modelId in textEmbeddingModels) {
@@ -8410,7 +8391,7 @@ function customProvider({
8410
8391
  if (fallbackProvider) {
8411
8392
  return fallbackProvider.textEmbeddingModel(modelId);
8412
8393
  }
8413
- throw new import_provider25.NoSuchModelError({ modelId, modelType: "textEmbeddingModel" });
8394
+ throw new import_provider26.NoSuchModelError({ modelId, modelType: "textEmbeddingModel" });
8414
8395
  },
8415
8396
  imageModel(modelId) {
8416
8397
  if (imageModels != null && modelId in imageModels) {
@@ -8419,19 +8400,19 @@ function customProvider({
8419
8400
  if (fallbackProvider == null ? void 0 : fallbackProvider.imageModel) {
8420
8401
  return fallbackProvider.imageModel(modelId);
8421
8402
  }
8422
- throw new import_provider25.NoSuchModelError({ modelId, modelType: "imageModel" });
8403
+ throw new import_provider26.NoSuchModelError({ modelId, modelType: "imageModel" });
8423
8404
  }
8424
8405
  };
8425
8406
  }
8426
8407
  var experimental_customProvider = customProvider;
8427
8408
 
8428
8409
  // core/registry/no-such-provider-error.ts
8429
- var import_provider26 = require("@ai-sdk/provider");
8410
+ var import_provider27 = require("@ai-sdk/provider");
8430
8411
  var name16 = "AI_NoSuchProviderError";
8431
8412
  var marker16 = `vercel.ai.error.${name16}`;
8432
8413
  var symbol16 = Symbol.for(marker16);
8433
8414
  var _a16;
8434
- var NoSuchProviderError = class extends import_provider26.NoSuchModelError {
8415
+ var NoSuchProviderError = class extends import_provider27.NoSuchModelError {
8435
8416
  constructor({
8436
8417
  modelId,
8437
8418
  modelType,
@@ -8445,13 +8426,13 @@ var NoSuchProviderError = class extends import_provider26.NoSuchModelError {
8445
8426
  this.availableProviders = availableProviders;
8446
8427
  }
8447
8428
  static isInstance(error) {
8448
- return import_provider26.AISDKError.hasMarker(error, marker16);
8429
+ return import_provider27.AISDKError.hasMarker(error, marker16);
8449
8430
  }
8450
8431
  };
8451
8432
  _a16 = symbol16;
8452
8433
 
8453
8434
  // core/registry/provider-registry.ts
8454
- var import_provider27 = require("@ai-sdk/provider");
8435
+ var import_provider28 = require("@ai-sdk/provider");
8455
8436
  function createProviderRegistry(providers, {
8456
8437
  separator = ":"
8457
8438
  } = {}) {
@@ -8490,7 +8471,7 @@ var DefaultProviderRegistry = class {
8490
8471
  splitId(id, modelType) {
8491
8472
  const index = id.indexOf(this.separator);
8492
8473
  if (index === -1) {
8493
- throw new import_provider27.NoSuchModelError({
8474
+ throw new import_provider28.NoSuchModelError({
8494
8475
  modelId: id,
8495
8476
  modelType,
8496
8477
  message: `Invalid ${modelType} id for registry: ${id} (must be in the format "providerId${this.separator}modelId")`
@@ -8503,7 +8484,7 @@ var DefaultProviderRegistry = class {
8503
8484
  const [providerId, modelId] = this.splitId(id, "languageModel");
8504
8485
  const model = (_b = (_a17 = this.getProvider(providerId)).languageModel) == null ? void 0 : _b.call(_a17, modelId);
8505
8486
  if (model == null) {
8506
- throw new import_provider27.NoSuchModelError({ modelId: id, modelType: "languageModel" });
8487
+ throw new import_provider28.NoSuchModelError({ modelId: id, modelType: "languageModel" });
8507
8488
  }
8508
8489
  return model;
8509
8490
  }
@@ -8513,7 +8494,7 @@ var DefaultProviderRegistry = class {
8513
8494
  const provider = this.getProvider(providerId);
8514
8495
  const model = (_a17 = provider.textEmbeddingModel) == null ? void 0 : _a17.call(provider, modelId);
8515
8496
  if (model == null) {
8516
- throw new import_provider27.NoSuchModelError({
8497
+ throw new import_provider28.NoSuchModelError({
8517
8498
  modelId: id,
8518
8499
  modelType: "textEmbeddingModel"
8519
8500
  });
@@ -8526,7 +8507,7 @@ var DefaultProviderRegistry = class {
8526
8507
  const provider = this.getProvider(providerId);
8527
8508
  const model = (_a17 = provider.imageModel) == null ? void 0 : _a17.call(provider, modelId);
8528
8509
  if (model == null) {
8529
- throw new import_provider27.NoSuchModelError({ modelId: id, modelType: "imageModel" });
8510
+ throw new import_provider28.NoSuchModelError({ modelId: id, modelType: "imageModel" });
8530
8511
  }
8531
8512
  return model;
8532
8513
  }
@@ -8538,7 +8519,7 @@ function tool(tool2) {
8538
8519
  }
8539
8520
 
8540
8521
  // core/tool/mcp/mcp-sse-transport.ts
8541
- var import_provider_utils19 = require("@ai-sdk/provider-utils");
8522
+ var import_provider_utils18 = require("@ai-sdk/provider-utils");
8542
8523
 
8543
8524
  // core/tool/mcp/json-rpc-message.ts
8544
8525
  var import_zod9 = require("zod");
@@ -8709,7 +8690,7 @@ var SseMCPTransport = class {
8709
8690
  (_b = this.onerror) == null ? void 0 : _b.call(this, error);
8710
8691
  return reject(error);
8711
8692
  }
8712
- const stream = response.body.pipeThrough(new TextDecoderStream()).pipeThrough((0, import_provider_utils19.createEventSourceParserStream)());
8693
+ const stream = response.body.pipeThrough(new TextDecoderStream()).pipeThrough((0, import_provider_utils18.createEventSourceParserStream)());
8713
8694
  const reader = stream.getReader();
8714
8695
  const processEvents = async () => {
8715
8696
  var _a18, _b2, _c2;
@@ -9016,6 +8997,7 @@ var MCPClient = class {
9016
8997
  async tools({
9017
8998
  schemas = "automatic"
9018
8999
  } = {}) {
9000
+ var _a17;
9019
9001
  const tools = {};
9020
9002
  try {
9021
9003
  const listToolsResult = await this.listTools();
@@ -9023,14 +9005,18 @@ var MCPClient = class {
9023
9005
  if (schemas !== "automatic" && !(name17 in schemas)) {
9024
9006
  continue;
9025
9007
  }
9026
- const parameters = schemas === "automatic" ? jsonSchema(inputSchema) : schemas[name17].parameters;
9008
+ const parameters = schemas === "automatic" ? jsonSchema({
9009
+ ...inputSchema,
9010
+ properties: (_a17 = inputSchema.properties) != null ? _a17 : {},
9011
+ additionalProperties: false
9012
+ }) : schemas[name17].parameters;
9027
9013
  const self = this;
9028
9014
  const toolWithExecute = tool({
9029
9015
  description,
9030
9016
  parameters,
9031
9017
  execute: async (args, options) => {
9032
- var _a17;
9033
- (_a17 = options == null ? void 0 : options.abortSignal) == null ? void 0 : _a17.throwIfAborted();
9018
+ var _a18;
9019
+ (_a18 = options == null ? void 0 : options.abortSignal) == null ? void 0 : _a18.throwIfAborted();
9034
9020
  return self.callTool({
9035
9021
  name: name17,
9036
9022
  args,
@@ -9116,7 +9102,7 @@ function cosineSimilarity(vector1, vector2, options) {
9116
9102
  }
9117
9103
 
9118
9104
  // core/util/simulate-readable-stream.ts
9119
- var import_provider_utils20 = require("@ai-sdk/provider-utils");
9105
+ var import_provider_utils19 = require("@ai-sdk/provider-utils");
9120
9106
  function simulateReadableStream({
9121
9107
  chunks,
9122
9108
  initialDelayInMs = 0,
@@ -9124,7 +9110,7 @@ function simulateReadableStream({
9124
9110
  _internal
9125
9111
  }) {
9126
9112
  var _a17;
9127
- const delay2 = (_a17 = _internal == null ? void 0 : _internal.delay) != null ? _a17 : import_provider_utils20.delay;
9113
+ const delay2 = (_a17 = _internal == null ? void 0 : _internal.delay) != null ? _a17 : import_provider_utils19.delay;
9128
9114
  let index = 0;
9129
9115
  return new ReadableStream({
9130
9116
  async pull(controller) {
@@ -9251,10 +9237,10 @@ __export(llamaindex_adapter_exports, {
9251
9237
  toDataStream: () => toDataStream2,
9252
9238
  toDataStreamResponse: () => toDataStreamResponse2
9253
9239
  });
9254
- var import_provider_utils22 = require("@ai-sdk/provider-utils");
9240
+ var import_provider_utils21 = require("@ai-sdk/provider-utils");
9255
9241
  function toDataStreamInternal2(stream, callbacks) {
9256
9242
  const trimStart = trimStartOfStream();
9257
- return (0, import_provider_utils22.convertAsyncIteratorToReadableStream)(stream[Symbol.asyncIterator]()).pipeThrough(
9243
+ return (0, import_provider_utils21.convertAsyncIteratorToReadableStream)(stream[Symbol.asyncIterator]()).pipeThrough(
9258
9244
  new TransformStream({
9259
9245
  async transform(message, controller) {
9260
9246
  controller.enqueue(trimStart(message.delta));