ai 5.0.0-canary.6 → 5.0.0-canary.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -1422,7 +1422,10 @@ function isSchema(value) {
1422
1422
  return typeof value === "object" && value !== null && schemaSymbol in value && value[schemaSymbol] === true && "jsonSchema" in value && "validate" in value;
1423
1423
  }
1424
1424
  function asSchema(schema) {
1425
- return isSchema(schema) ? schema : zodSchema(schema);
1425
+ return schema == null ? jsonSchema({
1426
+ properties: {},
1427
+ additionalProperties: false
1428
+ }) : isSchema(schema) ? schema : zodSchema(schema);
1426
1429
  }
1427
1430
 
1428
1431
  // core/util/should-resubmit-messages.ts
@@ -1990,6 +1993,7 @@ function selectTelemetryAttributes({
1990
1993
  async function embed({
1991
1994
  model,
1992
1995
  value,
1996
+ providerOptions,
1993
1997
  maxRetries: maxRetriesArg,
1994
1998
  abortSignal,
1995
1999
  headers,
@@ -2015,7 +2019,7 @@ async function embed({
2015
2019
  }),
2016
2020
  tracer,
2017
2021
  fn: async (span) => {
2018
- const { embedding, usage, rawResponse } = await retry(
2022
+ const { embedding, usage, response } = await retry(
2019
2023
  () => (
2020
2024
  // nested spans to align with the embedMany telemetry data:
2021
2025
  recordSpan({
@@ -2038,7 +2042,8 @@ async function embed({
2038
2042
  const modelResponse = await model.doEmbed({
2039
2043
  values: [value],
2040
2044
  abortSignal,
2041
- headers
2045
+ headers,
2046
+ providerOptions
2042
2047
  });
2043
2048
  const embedding2 = modelResponse.embeddings[0];
2044
2049
  const usage2 = (_a17 = modelResponse.usage) != null ? _a17 : { tokens: NaN };
@@ -2058,7 +2063,7 @@ async function embed({
2058
2063
  return {
2059
2064
  embedding: embedding2,
2060
2065
  usage: usage2,
2061
- rawResponse: modelResponse.rawResponse
2066
+ response: modelResponse.response
2062
2067
  };
2063
2068
  }
2064
2069
  })
@@ -2073,7 +2078,12 @@ async function embed({
2073
2078
  }
2074
2079
  })
2075
2080
  );
2076
- return new DefaultEmbedResult({ value, embedding, usage, rawResponse });
2081
+ return new DefaultEmbedResult({
2082
+ value,
2083
+ embedding,
2084
+ usage,
2085
+ response
2086
+ });
2077
2087
  }
2078
2088
  });
2079
2089
  }
@@ -2082,7 +2092,7 @@ var DefaultEmbedResult = class {
2082
2092
  this.value = options.value;
2083
2093
  this.embedding = options.embedding;
2084
2094
  this.usage = options.usage;
2085
- this.rawResponse = options.rawResponse;
2095
+ this.response = options.response;
2086
2096
  }
2087
2097
  };
2088
2098
 
@@ -2105,6 +2115,7 @@ async function embedMany({
2105
2115
  maxRetries: maxRetriesArg,
2106
2116
  abortSignal,
2107
2117
  headers,
2118
+ providerOptions,
2108
2119
  experimental_telemetry: telemetry
2109
2120
  }) {
2110
2121
  const { maxRetries, retry } = prepareRetries({ maxRetries: maxRetriesArg });
@@ -2132,7 +2143,7 @@ async function embedMany({
2132
2143
  fn: async (span) => {
2133
2144
  const maxEmbeddingsPerCall = model.maxEmbeddingsPerCall;
2134
2145
  if (maxEmbeddingsPerCall == null) {
2135
- const { embeddings: embeddings2, usage } = await retry(() => {
2146
+ const { embeddings: embeddings2, usage, response } = await retry(() => {
2136
2147
  return recordSpan({
2137
2148
  name: "ai.embedMany.doEmbed",
2138
2149
  attributes: selectTelemetryAttributes({
@@ -2155,7 +2166,8 @@ async function embedMany({
2155
2166
  const modelResponse = await model.doEmbed({
2156
2167
  values,
2157
2168
  abortSignal,
2158
- headers
2169
+ headers,
2170
+ providerOptions
2159
2171
  });
2160
2172
  const embeddings3 = modelResponse.embeddings;
2161
2173
  const usage2 = (_a17 = modelResponse.usage) != null ? _a17 : { tokens: NaN };
@@ -2170,7 +2182,11 @@ async function embedMany({
2170
2182
  }
2171
2183
  })
2172
2184
  );
2173
- return { embeddings: embeddings3, usage: usage2 };
2185
+ return {
2186
+ embeddings: embeddings3,
2187
+ usage: usage2,
2188
+ response: modelResponse.response
2189
+ };
2174
2190
  }
2175
2191
  });
2176
2192
  });
@@ -2185,13 +2201,23 @@ async function embedMany({
2185
2201
  }
2186
2202
  })
2187
2203
  );
2188
- return new DefaultEmbedManyResult({ values, embeddings: embeddings2, usage });
2204
+ return new DefaultEmbedManyResult({
2205
+ values,
2206
+ embeddings: embeddings2,
2207
+ usage,
2208
+ responses: [response]
2209
+ });
2189
2210
  }
2190
2211
  const valueChunks = splitArray(values, maxEmbeddingsPerCall);
2191
2212
  const embeddings = [];
2213
+ const responses = [];
2192
2214
  let tokens = 0;
2193
2215
  for (const chunk of valueChunks) {
2194
- const { embeddings: responseEmbeddings, usage } = await retry(() => {
2216
+ const {
2217
+ embeddings: responseEmbeddings,
2218
+ usage,
2219
+ response
2220
+ } = await retry(() => {
2195
2221
  return recordSpan({
2196
2222
  name: "ai.embedMany.doEmbed",
2197
2223
  attributes: selectTelemetryAttributes({
@@ -2214,7 +2240,8 @@ async function embedMany({
2214
2240
  const modelResponse = await model.doEmbed({
2215
2241
  values: chunk,
2216
2242
  abortSignal,
2217
- headers
2243
+ headers,
2244
+ providerOptions
2218
2245
  });
2219
2246
  const embeddings2 = modelResponse.embeddings;
2220
2247
  const usage2 = (_a17 = modelResponse.usage) != null ? _a17 : { tokens: NaN };
@@ -2229,11 +2256,16 @@ async function embedMany({
2229
2256
  }
2230
2257
  })
2231
2258
  );
2232
- return { embeddings: embeddings2, usage: usage2 };
2259
+ return {
2260
+ embeddings: embeddings2,
2261
+ usage: usage2,
2262
+ response: modelResponse.response
2263
+ };
2233
2264
  }
2234
2265
  });
2235
2266
  });
2236
2267
  embeddings.push(...responseEmbeddings);
2268
+ responses.push(response);
2237
2269
  tokens += usage.tokens;
2238
2270
  }
2239
2271
  span.setAttributes(
@@ -2250,7 +2282,8 @@ async function embedMany({
2250
2282
  return new DefaultEmbedManyResult({
2251
2283
  values,
2252
2284
  embeddings,
2253
- usage: { tokens }
2285
+ usage: { tokens },
2286
+ responses
2254
2287
  });
2255
2288
  }
2256
2289
  });
@@ -2260,6 +2293,7 @@ var DefaultEmbedManyResult = class {
2260
2293
  this.values = options.values;
2261
2294
  this.embeddings = options.embeddings;
2262
2295
  this.usage = options.usage;
2296
+ this.responses = options.responses;
2263
2297
  }
2264
2298
  };
2265
2299
 
@@ -2552,9 +2586,6 @@ var NoObjectGeneratedError = class extends AISDKError4 {
2552
2586
  };
2553
2587
  _a4 = symbol4;
2554
2588
 
2555
- // core/prompt/convert-to-language-model-prompt.ts
2556
- import { convertUint8ArrayToBase64 as convertUint8ArrayToBase643 } from "@ai-sdk/provider-utils";
2557
-
2558
2589
  // util/download-error.ts
2559
2590
  import { AISDKError as AISDKError5 } from "@ai-sdk/provider";
2560
2591
  var name5 = "AI_DownloadError";
@@ -2607,10 +2638,12 @@ async function download({ url }) {
2607
2638
  }
2608
2639
 
2609
2640
  // core/prompt/data-content.ts
2641
+ import { AISDKError as AISDKError7 } from "@ai-sdk/provider";
2610
2642
  import {
2611
2643
  convertBase64ToUint8Array as convertBase64ToUint8Array2,
2612
2644
  convertUint8ArrayToBase64 as convertUint8ArrayToBase642
2613
2645
  } from "@ai-sdk/provider-utils";
2646
+ import { z } from "zod";
2614
2647
 
2615
2648
  // core/prompt/invalid-data-content-error.ts
2616
2649
  import { AISDKError as AISDKError6 } from "@ai-sdk/provider";
@@ -2634,8 +2667,23 @@ var InvalidDataContentError = class extends AISDKError6 {
2634
2667
  };
2635
2668
  _a6 = symbol6;
2636
2669
 
2670
+ // core/prompt/split-data-url.ts
2671
+ function splitDataUrl(dataUrl) {
2672
+ try {
2673
+ const [header, base64Content] = dataUrl.split(",");
2674
+ return {
2675
+ mediaType: header.split(";")[0].split(":")[1],
2676
+ base64Content
2677
+ };
2678
+ } catch (error) {
2679
+ return {
2680
+ mediaType: void 0,
2681
+ base64Content: void 0
2682
+ };
2683
+ }
2684
+ }
2685
+
2637
2686
  // core/prompt/data-content.ts
2638
- import { z } from "zod";
2639
2687
  var dataContentSchema = z.union([
2640
2688
  z.string(),
2641
2689
  z.instanceof(Uint8Array),
@@ -2649,6 +2697,33 @@ var dataContentSchema = z.union([
2649
2697
  { message: "Must be a Buffer" }
2650
2698
  )
2651
2699
  ]);
2700
+ function convertToLanguageModelV2DataContent(content) {
2701
+ if (content instanceof Uint8Array) {
2702
+ return { data: content, mediaType: void 0 };
2703
+ }
2704
+ if (content instanceof ArrayBuffer) {
2705
+ return { data: new Uint8Array(content), mediaType: void 0 };
2706
+ }
2707
+ if (typeof content === "string") {
2708
+ try {
2709
+ content = new URL(content);
2710
+ } catch (error) {
2711
+ }
2712
+ }
2713
+ if (content instanceof URL && content.protocol === "data:") {
2714
+ const { mediaType: dataUrlMediaType, base64Content } = splitDataUrl(
2715
+ content.toString()
2716
+ );
2717
+ if (dataUrlMediaType == null || base64Content == null) {
2718
+ throw new AISDKError7({
2719
+ name: "InvalidDataContentError",
2720
+ message: `Invalid data URL format in content ${content.toString()}`
2721
+ });
2722
+ }
2723
+ return { data: base64Content, mediaType: dataUrlMediaType };
2724
+ }
2725
+ return { data: content, mediaType: void 0 };
2726
+ }
2652
2727
  function convertDataContentToBase64String(content) {
2653
2728
  if (typeof content === "string") {
2654
2729
  return content;
@@ -2687,12 +2762,12 @@ function convertUint8ArrayToText(uint8Array) {
2687
2762
  }
2688
2763
 
2689
2764
  // core/prompt/invalid-message-role-error.ts
2690
- import { AISDKError as AISDKError7 } from "@ai-sdk/provider";
2765
+ import { AISDKError as AISDKError8 } from "@ai-sdk/provider";
2691
2766
  var name7 = "AI_InvalidMessageRoleError";
2692
2767
  var marker7 = `vercel.ai.error.${name7}`;
2693
2768
  var symbol7 = Symbol.for(marker7);
2694
2769
  var _a7;
2695
- var InvalidMessageRoleError = class extends AISDKError7 {
2770
+ var InvalidMessageRoleError = class extends AISDKError8 {
2696
2771
  constructor({
2697
2772
  role,
2698
2773
  message = `Invalid message role: '${role}'. Must be one of: "system", "user", "assistant", "tool".`
@@ -2702,27 +2777,11 @@ var InvalidMessageRoleError = class extends AISDKError7 {
2702
2777
  this.role = role;
2703
2778
  }
2704
2779
  static isInstance(error) {
2705
- return AISDKError7.hasMarker(error, marker7);
2780
+ return AISDKError8.hasMarker(error, marker7);
2706
2781
  }
2707
2782
  };
2708
2783
  _a7 = symbol7;
2709
2784
 
2710
- // core/prompt/split-data-url.ts
2711
- function splitDataUrl(dataUrl) {
2712
- try {
2713
- const [header, base64Content] = dataUrl.split(",");
2714
- return {
2715
- mediaType: header.split(";")[0].split(":")[1],
2716
- base64Content
2717
- };
2718
- } catch (error) {
2719
- return {
2720
- mediaType: void 0,
2721
- base64Content: void 0
2722
- };
2723
- }
2724
- }
2725
-
2726
2785
  // core/prompt/convert-to-language-model-prompt.ts
2727
2786
  async function convertToLanguageModelPrompt({
2728
2787
  prompt,
@@ -2744,14 +2803,13 @@ async function convertToLanguageModelPrompt({
2744
2803
  ];
2745
2804
  }
2746
2805
  function convertToLanguageModelMessage(message, downloadedAssets) {
2747
- var _a17, _b, _c, _d, _e, _f;
2748
2806
  const role = message.role;
2749
2807
  switch (role) {
2750
2808
  case "system": {
2751
2809
  return {
2752
2810
  role: "system",
2753
2811
  content: message.content,
2754
- providerOptions: (_a17 = message.providerOptions) != null ? _a17 : message.experimental_providerMetadata
2812
+ providerOptions: message.providerOptions
2755
2813
  };
2756
2814
  }
2757
2815
  case "user": {
@@ -2759,13 +2817,13 @@ function convertToLanguageModelMessage(message, downloadedAssets) {
2759
2817
  return {
2760
2818
  role: "user",
2761
2819
  content: [{ type: "text", text: message.content }],
2762
- providerOptions: (_b = message.providerOptions) != null ? _b : message.experimental_providerMetadata
2820
+ providerOptions: message.providerOptions
2763
2821
  };
2764
2822
  }
2765
2823
  return {
2766
2824
  role: "user",
2767
2825
  content: message.content.map((part) => convertPartToLanguageModelPart(part, downloadedAssets)).filter((part) => part.type !== "text" || part.text !== ""),
2768
- providerOptions: (_c = message.providerOptions) != null ? _c : message.experimental_providerMetadata
2826
+ providerOptions: message.providerOptions
2769
2827
  };
2770
2828
  }
2771
2829
  case "assistant": {
@@ -2773,7 +2831,7 @@ function convertToLanguageModelMessage(message, downloadedAssets) {
2773
2831
  return {
2774
2832
  role: "assistant",
2775
2833
  content: [{ type: "text", text: message.content }],
2776
- providerOptions: (_d = message.providerOptions) != null ? _d : message.experimental_providerMetadata
2834
+ providerOptions: message.providerOptions
2777
2835
  };
2778
2836
  }
2779
2837
  return {
@@ -2782,15 +2840,18 @@ function convertToLanguageModelMessage(message, downloadedAssets) {
2782
2840
  // remove empty text parts:
2783
2841
  (part) => part.type !== "text" || part.text !== ""
2784
2842
  ).map((part) => {
2785
- var _a18, _b2;
2786
- const providerOptions = (_a18 = part.providerOptions) != null ? _a18 : part.experimental_providerMetadata;
2843
+ var _a17;
2844
+ const providerOptions = part.providerOptions;
2787
2845
  switch (part.type) {
2788
2846
  case "file": {
2847
+ const { data, mediaType } = convertToLanguageModelV2DataContent(
2848
+ part.data
2849
+ );
2789
2850
  return {
2790
2851
  type: "file",
2791
- data: part.data instanceof URL ? part.data : convertDataContentToBase64String(part.data),
2852
+ data,
2792
2853
  filename: part.filename,
2793
- mediaType: (_b2 = part.mediaType) != null ? _b2 : part.mimeType,
2854
+ mediaType: (_a17 = mediaType != null ? mediaType : part.mediaType) != null ? _a17 : part.mimeType,
2794
2855
  providerOptions
2795
2856
  };
2796
2857
  }
@@ -2827,25 +2888,22 @@ function convertToLanguageModelMessage(message, downloadedAssets) {
2827
2888
  }
2828
2889
  }
2829
2890
  }),
2830
- providerOptions: (_e = message.providerOptions) != null ? _e : message.experimental_providerMetadata
2891
+ providerOptions: message.providerOptions
2831
2892
  };
2832
2893
  }
2833
2894
  case "tool": {
2834
2895
  return {
2835
2896
  role: "tool",
2836
- content: message.content.map((part) => {
2837
- var _a18;
2838
- return {
2839
- type: "tool-result",
2840
- toolCallId: part.toolCallId,
2841
- toolName: part.toolName,
2842
- result: part.result,
2843
- content: part.experimental_content,
2844
- isError: part.isError,
2845
- providerOptions: (_a18 = part.providerOptions) != null ? _a18 : part.experimental_providerMetadata
2846
- };
2847
- }),
2848
- providerOptions: (_f = message.providerOptions) != null ? _f : message.experimental_providerMetadata
2897
+ content: message.content.map((part) => ({
2898
+ type: "tool-result",
2899
+ toolCallId: part.toolCallId,
2900
+ toolName: part.toolName,
2901
+ result: part.result,
2902
+ content: part.experimental_content,
2903
+ isError: part.isError,
2904
+ providerOptions: part.providerOptions
2905
+ })),
2906
+ providerOptions: message.providerOptions
2849
2907
  };
2850
2908
  }
2851
2909
  default: {
@@ -2878,71 +2936,48 @@ async function downloadAssets(messages, downloadImplementation, modelSupportsIma
2878
2936
  );
2879
2937
  }
2880
2938
  function convertPartToLanguageModelPart(part, downloadedAssets) {
2881
- var _a17, _b, _c, _d, _e;
2939
+ var _a17, _b, _c;
2882
2940
  if (part.type === "text") {
2883
2941
  return {
2884
2942
  type: "text",
2885
2943
  text: part.text,
2886
- providerOptions: (_a17 = part.providerOptions) != null ? _a17 : part.experimental_providerMetadata
2944
+ providerOptions: part.providerOptions
2887
2945
  };
2888
2946
  }
2889
- let mediaType = (_b = part.mediaType) != null ? _b : part.mimeType;
2890
- let data;
2891
- let content;
2892
- let normalizedData;
2947
+ let originalData;
2893
2948
  const type = part.type;
2894
2949
  switch (type) {
2895
2950
  case "image":
2896
- data = part.image;
2951
+ originalData = part.image;
2897
2952
  break;
2898
2953
  case "file":
2899
- data = part.data;
2954
+ originalData = part.data;
2900
2955
  break;
2901
2956
  default:
2902
2957
  throw new Error(`Unsupported part type: ${type}`);
2903
2958
  }
2904
- try {
2905
- content = typeof data === "string" ? new URL(data) : data;
2906
- } catch (error) {
2907
- content = data;
2908
- }
2909
- if (content instanceof URL) {
2910
- if (content.protocol === "data:") {
2911
- const { mediaType: dataUrlMediaType, base64Content } = splitDataUrl(
2912
- content.toString()
2913
- );
2914
- if (dataUrlMediaType == null || base64Content == null) {
2915
- throw new Error(`Invalid data URL format in part ${type}`);
2916
- }
2917
- mediaType = dataUrlMediaType;
2918
- normalizedData = convertDataContentToUint8Array(base64Content);
2919
- } else {
2920
- const downloadedFile = downloadedAssets[content.toString()];
2921
- if (downloadedFile) {
2922
- normalizedData = downloadedFile.data;
2923
- mediaType != null ? mediaType : mediaType = downloadedFile.mediaType;
2924
- } else {
2925
- normalizedData = content;
2926
- }
2959
+ const { data: convertedData, mediaType: convertedMediaType } = convertToLanguageModelV2DataContent(originalData);
2960
+ let mediaType = (_a17 = convertedMediaType != null ? convertedMediaType : part.mediaType) != null ? _a17 : part.mimeType;
2961
+ let data = convertedData;
2962
+ if (data instanceof URL) {
2963
+ const downloadedFile = downloadedAssets[data.toString()];
2964
+ if (downloadedFile) {
2965
+ data = downloadedFile.data;
2966
+ mediaType = (_b = downloadedFile.mediaType) != null ? _b : mediaType;
2927
2967
  }
2928
- } else {
2929
- normalizedData = convertDataContentToUint8Array(content);
2930
2968
  }
2931
2969
  switch (type) {
2932
2970
  case "image": {
2933
- if (normalizedData instanceof Uint8Array) {
2934
- mediaType = (_c = detectMediaType({
2935
- data: normalizedData,
2936
- signatures: imageMediaTypeSignatures
2937
- })) != null ? _c : mediaType;
2971
+ if (data instanceof Uint8Array || typeof data === "string") {
2972
+ mediaType = (_c = detectMediaType({ data, signatures: imageMediaTypeSignatures })) != null ? _c : mediaType;
2938
2973
  }
2939
2974
  return {
2940
2975
  type: "file",
2941
2976
  mediaType: mediaType != null ? mediaType : "image/*",
2942
2977
  // any image
2943
2978
  filename: void 0,
2944
- data: normalizedData instanceof Uint8Array ? convertUint8ArrayToBase643(normalizedData) : normalizedData,
2945
- providerOptions: (_d = part.providerOptions) != null ? _d : part.experimental_providerMetadata
2979
+ data,
2980
+ providerOptions: part.providerOptions
2946
2981
  };
2947
2982
  }
2948
2983
  case "file": {
@@ -2953,8 +2988,8 @@ function convertPartToLanguageModelPart(part, downloadedAssets) {
2953
2988
  type: "file",
2954
2989
  mediaType,
2955
2990
  filename: part.filename,
2956
- data: normalizedData instanceof Uint8Array ? convertDataContentToBase64String(normalizedData) : normalizedData,
2957
- providerOptions: (_e = part.providerOptions) != null ? _e : part.experimental_providerMetadata
2991
+ data,
2992
+ providerOptions: part.providerOptions
2958
2993
  };
2959
2994
  }
2960
2995
  }
@@ -2962,7 +2997,7 @@ function convertPartToLanguageModelPart(part, downloadedAssets) {
2962
2997
 
2963
2998
  // core/prompt/prepare-call-settings.ts
2964
2999
  function prepareCallSettings({
2965
- maxTokens,
3000
+ maxOutputTokens,
2966
3001
  temperature,
2967
3002
  topP,
2968
3003
  topK,
@@ -2971,19 +3006,19 @@ function prepareCallSettings({
2971
3006
  stopSequences,
2972
3007
  seed
2973
3008
  }) {
2974
- if (maxTokens != null) {
2975
- if (!Number.isInteger(maxTokens)) {
3009
+ if (maxOutputTokens != null) {
3010
+ if (!Number.isInteger(maxOutputTokens)) {
2976
3011
  throw new InvalidArgumentError({
2977
- parameter: "maxTokens",
2978
- value: maxTokens,
2979
- message: "maxTokens must be an integer"
3012
+ parameter: "maxOutputTokens",
3013
+ value: maxOutputTokens,
3014
+ message: "maxOutputTokens must be an integer"
2980
3015
  });
2981
3016
  }
2982
- if (maxTokens < 1) {
3017
+ if (maxOutputTokens < 1) {
2983
3018
  throw new InvalidArgumentError({
2984
- parameter: "maxTokens",
2985
- value: maxTokens,
2986
- message: "maxTokens must be >= 1"
3019
+ parameter: "maxOutputTokens",
3020
+ value: maxOutputTokens,
3021
+ message: "maxOutputTokens must be >= 1"
2987
3022
  });
2988
3023
  }
2989
3024
  }
@@ -3042,7 +3077,7 @@ function prepareCallSettings({
3042
3077
  }
3043
3078
  }
3044
3079
  return {
3045
- maxTokens,
3080
+ maxOutputTokens,
3046
3081
  // TODO v5 remove default 0 for temperature
3047
3082
  temperature: temperature != null ? temperature : 0,
3048
3083
  topP,
@@ -3137,12 +3172,12 @@ function attachmentsToParts(attachments) {
3137
3172
  }
3138
3173
 
3139
3174
  // core/prompt/message-conversion-error.ts
3140
- import { AISDKError as AISDKError8 } from "@ai-sdk/provider";
3175
+ import { AISDKError as AISDKError9 } from "@ai-sdk/provider";
3141
3176
  var name8 = "AI_MessageConversionError";
3142
3177
  var marker8 = `vercel.ai.error.${name8}`;
3143
3178
  var symbol8 = Symbol.for(marker8);
3144
3179
  var _a8;
3145
- var MessageConversionError = class extends AISDKError8 {
3180
+ var MessageConversionError = class extends AISDKError9 {
3146
3181
  constructor({
3147
3182
  originalMessage,
3148
3183
  message
@@ -3152,7 +3187,7 @@ var MessageConversionError = class extends AISDKError8 {
3152
3187
  this.originalMessage = originalMessage;
3153
3188
  }
3154
3189
  static isInstance(error) {
3155
- return AISDKError8.hasMarker(error, marker8);
3190
+ return AISDKError9.hasMarker(error, marker8);
3156
3191
  }
3157
3192
  };
3158
3193
  _a8 = symbol8;
@@ -3426,7 +3461,7 @@ function detectSingleMessageCharacteristics(message) {
3426
3461
  "experimental_attachments" in message)) {
3427
3462
  return "has-ui-specific-parts";
3428
3463
  } else if (typeof message === "object" && message !== null && "content" in message && (Array.isArray(message.content) || // Core messages can have array content
3429
- "experimental_providerMetadata" in message || "providerOptions" in message)) {
3464
+ "providerOptions" in message)) {
3430
3465
  return "has-core-specific-parts";
3431
3466
  } else if (typeof message === "object" && message !== null && "role" in message && "content" in message && typeof message.content === "string" && ["system", "user", "assistant", "tool"].includes(message.role)) {
3432
3467
  return "message";
@@ -3480,16 +3515,14 @@ var toolResultContentSchema = z4.array(
3480
3515
  var textPartSchema = z5.object({
3481
3516
  type: z5.literal("text"),
3482
3517
  text: z5.string(),
3483
- providerOptions: providerMetadataSchema.optional(),
3484
- experimental_providerMetadata: providerMetadataSchema.optional()
3518
+ providerOptions: providerMetadataSchema.optional()
3485
3519
  });
3486
3520
  var imagePartSchema = z5.object({
3487
3521
  type: z5.literal("image"),
3488
3522
  image: z5.union([dataContentSchema, z5.instanceof(URL)]),
3489
3523
  mediaType: z5.string().optional(),
3490
3524
  mimeType: z5.string().optional(),
3491
- providerOptions: providerMetadataSchema.optional(),
3492
- experimental_providerMetadata: providerMetadataSchema.optional()
3525
+ providerOptions: providerMetadataSchema.optional()
3493
3526
  });
3494
3527
  var filePartSchema = z5.object({
3495
3528
  type: z5.literal("file"),
@@ -3497,28 +3530,24 @@ var filePartSchema = z5.object({
3497
3530
  filename: z5.string().optional(),
3498
3531
  mediaType: z5.string(),
3499
3532
  mimeType: z5.string().optional(),
3500
- providerOptions: providerMetadataSchema.optional(),
3501
- experimental_providerMetadata: providerMetadataSchema.optional()
3533
+ providerOptions: providerMetadataSchema.optional()
3502
3534
  });
3503
3535
  var reasoningPartSchema = z5.object({
3504
3536
  type: z5.literal("reasoning"),
3505
3537
  text: z5.string(),
3506
- providerOptions: providerMetadataSchema.optional(),
3507
- experimental_providerMetadata: providerMetadataSchema.optional()
3538
+ providerOptions: providerMetadataSchema.optional()
3508
3539
  });
3509
3540
  var redactedReasoningPartSchema = z5.object({
3510
3541
  type: z5.literal("redacted-reasoning"),
3511
3542
  data: z5.string(),
3512
- providerOptions: providerMetadataSchema.optional(),
3513
- experimental_providerMetadata: providerMetadataSchema.optional()
3543
+ providerOptions: providerMetadataSchema.optional()
3514
3544
  });
3515
3545
  var toolCallPartSchema = z5.object({
3516
3546
  type: z5.literal("tool-call"),
3517
3547
  toolCallId: z5.string(),
3518
3548
  toolName: z5.string(),
3519
3549
  args: z5.unknown(),
3520
- providerOptions: providerMetadataSchema.optional(),
3521
- experimental_providerMetadata: providerMetadataSchema.optional()
3550
+ providerOptions: providerMetadataSchema.optional()
3522
3551
  });
3523
3552
  var toolResultPartSchema = z5.object({
3524
3553
  type: z5.literal("tool-result"),
@@ -3527,16 +3556,14 @@ var toolResultPartSchema = z5.object({
3527
3556
  result: z5.unknown(),
3528
3557
  content: toolResultContentSchema.optional(),
3529
3558
  isError: z5.boolean().optional(),
3530
- providerOptions: providerMetadataSchema.optional(),
3531
- experimental_providerMetadata: providerMetadataSchema.optional()
3559
+ providerOptions: providerMetadataSchema.optional()
3532
3560
  });
3533
3561
 
3534
3562
  // core/prompt/message.ts
3535
3563
  var coreSystemMessageSchema = z6.object({
3536
3564
  role: z6.literal("system"),
3537
3565
  content: z6.string(),
3538
- providerOptions: providerMetadataSchema.optional(),
3539
- experimental_providerMetadata: providerMetadataSchema.optional()
3566
+ providerOptions: providerMetadataSchema.optional()
3540
3567
  });
3541
3568
  var coreUserMessageSchema = z6.object({
3542
3569
  role: z6.literal("user"),
@@ -3544,8 +3571,7 @@ var coreUserMessageSchema = z6.object({
3544
3571
  z6.string(),
3545
3572
  z6.array(z6.union([textPartSchema, imagePartSchema, filePartSchema]))
3546
3573
  ]),
3547
- providerOptions: providerMetadataSchema.optional(),
3548
- experimental_providerMetadata: providerMetadataSchema.optional()
3574
+ providerOptions: providerMetadataSchema.optional()
3549
3575
  });
3550
3576
  var coreAssistantMessageSchema = z6.object({
3551
3577
  role: z6.literal("assistant"),
@@ -3561,14 +3587,12 @@ var coreAssistantMessageSchema = z6.object({
3561
3587
  ])
3562
3588
  )
3563
3589
  ]),
3564
- providerOptions: providerMetadataSchema.optional(),
3565
- experimental_providerMetadata: providerMetadataSchema.optional()
3590
+ providerOptions: providerMetadataSchema.optional()
3566
3591
  });
3567
3592
  var coreToolMessageSchema = z6.object({
3568
3593
  role: z6.literal("tool"),
3569
3594
  content: z6.array(toolResultPartSchema),
3570
- providerOptions: providerMetadataSchema.optional(),
3571
- experimental_providerMetadata: providerMetadataSchema.optional()
3595
+ providerOptions: providerMetadataSchema.optional()
3572
3596
  });
3573
3597
  var coreMessageSchema = z6.union([
3574
3598
  coreSystemMessageSchema,
@@ -3657,13 +3681,13 @@ function standardizePrompt({
3657
3681
 
3658
3682
  // core/types/usage.ts
3659
3683
  function calculateLanguageModelUsage2({
3660
- promptTokens,
3661
- completionTokens
3684
+ inputTokens,
3685
+ outputTokens
3662
3686
  }) {
3663
3687
  return {
3664
- promptTokens,
3665
- completionTokens,
3666
- totalTokens: promptTokens + completionTokens
3688
+ promptTokens: inputTokens != null ? inputTokens : NaN,
3689
+ completionTokens: outputTokens != null ? outputTokens : NaN,
3690
+ totalTokens: (inputTokens != null ? inputTokens : 0) + (outputTokens != null ? outputTokens : 0)
3667
3691
  };
3668
3692
  }
3669
3693
  function addLanguageModelUsage(usage1, usage2) {
@@ -4088,8 +4112,7 @@ async function generateObject({
4088
4112
  headers,
4089
4113
  experimental_repairText: repairText,
4090
4114
  experimental_telemetry: telemetry,
4091
- experimental_providerMetadata,
4092
- providerOptions = experimental_providerMetadata,
4115
+ providerOptions,
4093
4116
  _internal: {
4094
4117
  generateId: generateId3 = originalGenerateId,
4095
4118
  currentDate = () => /* @__PURE__ */ new Date()
@@ -4143,7 +4166,7 @@ async function generateObject({
4143
4166
  }),
4144
4167
  tracer,
4145
4168
  fn: async (span) => {
4146
- var _a17, _b, _c, _d;
4169
+ var _a17, _b, _c, _d, _e;
4147
4170
  if (mode === "auto" || mode == null) {
4148
4171
  mode = model.defaultObjectGenerationMode;
4149
4172
  }
@@ -4196,7 +4219,7 @@ async function generateObject({
4196
4219
  "gen_ai.system": model.provider,
4197
4220
  "gen_ai.request.model": model.modelId,
4198
4221
  "gen_ai.request.frequency_penalty": settings.frequencyPenalty,
4199
- "gen_ai.request.max_tokens": settings.maxTokens,
4222
+ "gen_ai.request.max_tokens": settings.maxOutputTokens,
4200
4223
  "gen_ai.request.presence_penalty": settings.presencePenalty,
4201
4224
  "gen_ai.request.temperature": settings.temperature,
4202
4225
  "gen_ai.request.top_k": settings.topK,
@@ -4205,7 +4228,7 @@ async function generateObject({
4205
4228
  }),
4206
4229
  tracer,
4207
4230
  fn: async (span2) => {
4208
- var _a18, _b2, _c2, _d2, _e, _f, _g, _h;
4231
+ var _a18, _b2, _c2, _d2, _e2, _f, _g, _h;
4209
4232
  const result2 = await model.doGenerate({
4210
4233
  responseFormat: {
4211
4234
  type: "json",
@@ -4223,7 +4246,7 @@ async function generateObject({
4223
4246
  const responseData = {
4224
4247
  id: (_b2 = (_a18 = result2.response) == null ? void 0 : _a18.id) != null ? _b2 : generateId3(),
4225
4248
  timestamp: (_d2 = (_c2 = result2.response) == null ? void 0 : _c2.timestamp) != null ? _d2 : currentDate(),
4226
- modelId: (_f = (_e = result2.response) == null ? void 0 : _e.modelId) != null ? _f : model.modelId,
4249
+ modelId: (_f = (_e2 = result2.response) == null ? void 0 : _e2.modelId) != null ? _f : model.modelId,
4227
4250
  headers: (_g = result2.response) == null ? void 0 : _g.headers,
4228
4251
  body: (_h = result2.response) == null ? void 0 : _h.body
4229
4252
  };
@@ -4240,18 +4263,22 @@ async function generateObject({
4240
4263
  telemetry,
4241
4264
  attributes: {
4242
4265
  "ai.response.finishReason": result2.finishReason,
4243
- "ai.response.object": { output: () => result2.text },
4266
+ "ai.response.object": { output: () => {
4267
+ var _a19;
4268
+ return (_a19 = result2.text) == null ? void 0 : _a19.text;
4269
+ } },
4244
4270
  "ai.response.id": responseData.id,
4245
4271
  "ai.response.model": responseData.modelId,
4246
4272
  "ai.response.timestamp": responseData.timestamp.toISOString(),
4247
- "ai.usage.promptTokens": result2.usage.promptTokens,
4248
- "ai.usage.completionTokens": result2.usage.completionTokens,
4273
+ // TODO rename telemetry attributes to inputTokens and outputTokens
4274
+ "ai.usage.promptTokens": result2.usage.inputTokens,
4275
+ "ai.usage.completionTokens": result2.usage.outputTokens,
4249
4276
  // standardized gen-ai llm span attributes:
4250
4277
  "gen_ai.response.finish_reasons": [result2.finishReason],
4251
4278
  "gen_ai.response.id": responseData.id,
4252
4279
  "gen_ai.response.model": responseData.modelId,
4253
- "gen_ai.usage.prompt_tokens": result2.usage.promptTokens,
4254
- "gen_ai.usage.completion_tokens": result2.usage.completionTokens
4280
+ "gen_ai.usage.input_tokens": result2.usage.inputTokens,
4281
+ "gen_ai.usage.output_tokens": result2.usage.outputTokens
4255
4282
  }
4256
4283
  })
4257
4284
  );
@@ -4259,13 +4286,13 @@ async function generateObject({
4259
4286
  }
4260
4287
  })
4261
4288
  );
4262
- result = generateResult.objectText;
4289
+ result = (_b = generateResult.objectText) == null ? void 0 : _b.text;
4263
4290
  finishReason = generateResult.finishReason;
4264
4291
  usage = generateResult.usage;
4265
4292
  warnings = generateResult.warnings;
4266
4293
  logprobs = generateResult.logprobs;
4267
4294
  resultProviderMetadata = generateResult.providerMetadata;
4268
- request = (_b = generateResult.request) != null ? _b : {};
4295
+ request = (_c = generateResult.request) != null ? _c : {};
4269
4296
  response = generateResult.responseData;
4270
4297
  break;
4271
4298
  }
@@ -4277,7 +4304,7 @@ async function generateObject({
4277
4304
  const promptMessages = await convertToLanguageModelPrompt({
4278
4305
  prompt: standardizedPrompt,
4279
4306
  modelSupportsImageUrls: model.supportsImageUrls,
4280
- modelSupportsUrl: (_c = model.supportsUrl) == null ? void 0 : _c.bind(model)
4307
+ modelSupportsUrl: (_d = model.supportsUrl) == null ? void 0 : _d.bind(model)
4281
4308
  // support 'this' context,
4282
4309
  });
4283
4310
  const inputFormat = standardizedPrompt.type;
@@ -4303,7 +4330,7 @@ async function generateObject({
4303
4330
  "gen_ai.system": model.provider,
4304
4331
  "gen_ai.request.model": model.modelId,
4305
4332
  "gen_ai.request.frequency_penalty": settings.frequencyPenalty,
4306
- "gen_ai.request.max_tokens": settings.maxTokens,
4333
+ "gen_ai.request.max_tokens": settings.maxOutputTokens,
4307
4334
  "gen_ai.request.presence_penalty": settings.presencePenalty,
4308
4335
  "gen_ai.request.temperature": settings.temperature,
4309
4336
  "gen_ai.request.top_k": settings.topK,
@@ -4312,7 +4339,7 @@ async function generateObject({
4312
4339
  }),
4313
4340
  tracer,
4314
4341
  fn: async (span2) => {
4315
- var _a18, _b2, _c2, _d2, _e, _f, _g, _h, _i, _j;
4342
+ var _a18, _b2, _c2, _d2, _e2, _f, _g, _h, _i, _j;
4316
4343
  const result2 = await model.doGenerate({
4317
4344
  tools: [
4318
4345
  {
@@ -4333,7 +4360,7 @@ async function generateObject({
4333
4360
  const objectText = (_b2 = (_a18 = result2.toolCalls) == null ? void 0 : _a18[0]) == null ? void 0 : _b2.args;
4334
4361
  const responseData = {
4335
4362
  id: (_d2 = (_c2 = result2.response) == null ? void 0 : _c2.id) != null ? _d2 : generateId3(),
4336
- timestamp: (_f = (_e = result2.response) == null ? void 0 : _e.timestamp) != null ? _f : currentDate(),
4363
+ timestamp: (_f = (_e2 = result2.response) == null ? void 0 : _e2.timestamp) != null ? _f : currentDate(),
4337
4364
  modelId: (_h = (_g = result2.response) == null ? void 0 : _g.modelId) != null ? _h : model.modelId,
4338
4365
  headers: (_i = result2.response) == null ? void 0 : _i.headers,
4339
4366
  body: (_j = result2.response) == null ? void 0 : _j.body
@@ -4355,14 +4382,15 @@ async function generateObject({
4355
4382
  "ai.response.id": responseData.id,
4356
4383
  "ai.response.model": responseData.modelId,
4357
4384
  "ai.response.timestamp": responseData.timestamp.toISOString(),
4358
- "ai.usage.promptTokens": result2.usage.promptTokens,
4359
- "ai.usage.completionTokens": result2.usage.completionTokens,
4385
+ // TODO rename telemetry attributes to inputTokens and outputTokens
4386
+ "ai.usage.promptTokens": result2.usage.inputTokens,
4387
+ "ai.usage.completionTokens": result2.usage.outputTokens,
4360
4388
  // standardized gen-ai llm span attributes:
4361
4389
  "gen_ai.response.finish_reasons": [result2.finishReason],
4362
4390
  "gen_ai.response.id": responseData.id,
4363
4391
  "gen_ai.response.model": responseData.modelId,
4364
- "gen_ai.usage.input_tokens": result2.usage.promptTokens,
4365
- "gen_ai.usage.output_tokens": result2.usage.completionTokens
4392
+ "gen_ai.usage.input_tokens": result2.usage.inputTokens,
4393
+ "gen_ai.usage.output_tokens": result2.usage.outputTokens
4366
4394
  }
4367
4395
  })
4368
4396
  );
@@ -4376,7 +4404,7 @@ async function generateObject({
4376
4404
  warnings = generateResult.warnings;
4377
4405
  logprobs = generateResult.logprobs;
4378
4406
  resultProviderMetadata = generateResult.providerMetadata;
4379
- request = (_d = generateResult.request) != null ? _d : {};
4407
+ request = (_e = generateResult.request) != null ? _e : {};
4380
4408
  response = generateResult.responseData;
4381
4409
  break;
4382
4410
  }
@@ -4447,8 +4475,9 @@ async function generateObject({
4447
4475
  "ai.response.object": {
4448
4476
  output: () => JSON.stringify(object2)
4449
4477
  },
4450
- "ai.usage.promptTokens": usage.promptTokens,
4451
- "ai.usage.completionTokens": usage.completionTokens
4478
+ // TODO rename telemetry attributes to inputTokens and outputTokens
4479
+ "ai.usage.promptTokens": usage.inputTokens,
4480
+ "ai.usage.completionTokens": usage.outputTokens
4452
4481
  }
4453
4482
  })
4454
4483
  );
@@ -4472,7 +4501,6 @@ var DefaultGenerateObjectResult = class {
4472
4501
  this.usage = options.usage;
4473
4502
  this.warnings = options.warnings;
4474
4503
  this.providerMetadata = options.providerMetadata;
4475
- this.experimental_providerMetadata = options.providerMetadata;
4476
4504
  this.response = options.response;
4477
4505
  this.request = options.request;
4478
4506
  this.logprobs = options.logprobs;
@@ -4648,8 +4676,7 @@ function streamObject({
4648
4676
  abortSignal,
4649
4677
  headers,
4650
4678
  experimental_telemetry: telemetry,
4651
- experimental_providerMetadata,
4652
- providerOptions = experimental_providerMetadata,
4679
+ providerOptions,
4653
4680
  onError,
4654
4681
  onFinish,
4655
4682
  _internal: {
@@ -4806,8 +4833,8 @@ var DefaultStreamObjectResult = class {
4806
4833
  transformer = {
4807
4834
  transform: (chunk, controller) => {
4808
4835
  switch (chunk.type) {
4809
- case "text-delta":
4810
- controller.enqueue(chunk.textDelta);
4836
+ case "text":
4837
+ controller.enqueue(chunk.text);
4811
4838
  break;
4812
4839
  case "response-metadata":
4813
4840
  case "finish":
@@ -4898,7 +4925,7 @@ var DefaultStreamObjectResult = class {
4898
4925
  "gen_ai.system": model.provider,
4899
4926
  "gen_ai.request.model": model.modelId,
4900
4927
  "gen_ai.request.frequency_penalty": settings.frequencyPenalty,
4901
- "gen_ai.request.max_tokens": settings.maxTokens,
4928
+ "gen_ai.request.max_tokens": settings.maxOutputTokens,
4902
4929
  "gen_ai.request.presence_penalty": settings.presencePenalty,
4903
4930
  "gen_ai.request.temperature": settings.temperature,
4904
4931
  "gen_ai.request.top_k": settings.topK,
@@ -5086,8 +5113,7 @@ var DefaultStreamObjectResult = class {
5086
5113
  headers: response == null ? void 0 : response.headers
5087
5114
  },
5088
5115
  warnings,
5089
- providerMetadata,
5090
- experimental_providerMetadata: providerMetadata
5116
+ providerMetadata
5091
5117
  }));
5092
5118
  } catch (error2) {
5093
5119
  controller.enqueue({ type: "error", error: error2 });
@@ -5119,9 +5145,6 @@ var DefaultStreamObjectResult = class {
5119
5145
  get usage() {
5120
5146
  return this.usagePromise.value;
5121
5147
  }
5122
- get experimental_providerMetadata() {
5123
- return this.providerMetadataPromise.value;
5124
- }
5125
5148
  get providerMetadata() {
5126
5149
  return this.providerMetadataPromise.value;
5127
5150
  }
@@ -5212,30 +5235,30 @@ var DefaultStreamObjectResult = class {
5212
5235
  import { createIdGenerator as createIdGenerator3 } from "@ai-sdk/provider-utils";
5213
5236
 
5214
5237
  // errors/no-output-specified-error.ts
5215
- import { AISDKError as AISDKError9 } from "@ai-sdk/provider";
5238
+ import { AISDKError as AISDKError10 } from "@ai-sdk/provider";
5216
5239
  var name9 = "AI_NoOutputSpecifiedError";
5217
5240
  var marker9 = `vercel.ai.error.${name9}`;
5218
5241
  var symbol9 = Symbol.for(marker9);
5219
5242
  var _a9;
5220
- var NoOutputSpecifiedError = class extends AISDKError9 {
5243
+ var NoOutputSpecifiedError = class extends AISDKError10 {
5221
5244
  // used in isInstance
5222
5245
  constructor({ message = "No output specified." } = {}) {
5223
5246
  super({ name: name9, message });
5224
5247
  this[_a9] = true;
5225
5248
  }
5226
5249
  static isInstance(error) {
5227
- return AISDKError9.hasMarker(error, marker9);
5250
+ return AISDKError10.hasMarker(error, marker9);
5228
5251
  }
5229
5252
  };
5230
5253
  _a9 = symbol9;
5231
5254
 
5232
5255
  // errors/tool-execution-error.ts
5233
- import { AISDKError as AISDKError10, getErrorMessage as getErrorMessage2 } from "@ai-sdk/provider";
5256
+ import { AISDKError as AISDKError11, getErrorMessage as getErrorMessage2 } from "@ai-sdk/provider";
5234
5257
  var name10 = "AI_ToolExecutionError";
5235
5258
  var marker10 = `vercel.ai.error.${name10}`;
5236
5259
  var symbol10 = Symbol.for(marker10);
5237
5260
  var _a10;
5238
- var ToolExecutionError = class extends AISDKError10 {
5261
+ var ToolExecutionError = class extends AISDKError11 {
5239
5262
  constructor({
5240
5263
  toolArgs,
5241
5264
  toolName,
@@ -5250,7 +5273,7 @@ var ToolExecutionError = class extends AISDKError10 {
5250
5273
  this.toolCallId = toolCallId;
5251
5274
  }
5252
5275
  static isInstance(error) {
5253
- return AISDKError10.hasMarker(error, marker10);
5276
+ return AISDKError11.hasMarker(error, marker10);
5254
5277
  }
5255
5278
  };
5256
5279
  _a10 = symbol10;
@@ -5321,12 +5344,12 @@ function removeTextAfterLastWhitespace(text2) {
5321
5344
  import { safeParseJSON as safeParseJSON3, safeValidateTypes as safeValidateTypes3 } from "@ai-sdk/provider-utils";
5322
5345
 
5323
5346
  // errors/invalid-tool-arguments-error.ts
5324
- import { AISDKError as AISDKError11, getErrorMessage as getErrorMessage3 } from "@ai-sdk/provider";
5347
+ import { AISDKError as AISDKError12, getErrorMessage as getErrorMessage3 } from "@ai-sdk/provider";
5325
5348
  var name11 = "AI_InvalidToolArgumentsError";
5326
5349
  var marker11 = `vercel.ai.error.${name11}`;
5327
5350
  var symbol11 = Symbol.for(marker11);
5328
5351
  var _a11;
5329
- var InvalidToolArgumentsError = class extends AISDKError11 {
5352
+ var InvalidToolArgumentsError = class extends AISDKError12 {
5330
5353
  constructor({
5331
5354
  toolArgs,
5332
5355
  toolName,
@@ -5341,18 +5364,18 @@ var InvalidToolArgumentsError = class extends AISDKError11 {
5341
5364
  this.toolName = toolName;
5342
5365
  }
5343
5366
  static isInstance(error) {
5344
- return AISDKError11.hasMarker(error, marker11);
5367
+ return AISDKError12.hasMarker(error, marker11);
5345
5368
  }
5346
5369
  };
5347
5370
  _a11 = symbol11;
5348
5371
 
5349
5372
  // errors/no-such-tool-error.ts
5350
- import { AISDKError as AISDKError12 } from "@ai-sdk/provider";
5373
+ import { AISDKError as AISDKError13 } from "@ai-sdk/provider";
5351
5374
  var name12 = "AI_NoSuchToolError";
5352
5375
  var marker12 = `vercel.ai.error.${name12}`;
5353
5376
  var symbol12 = Symbol.for(marker12);
5354
5377
  var _a12;
5355
- var NoSuchToolError = class extends AISDKError12 {
5378
+ var NoSuchToolError = class extends AISDKError13 {
5356
5379
  constructor({
5357
5380
  toolName,
5358
5381
  availableTools = void 0,
@@ -5364,18 +5387,18 @@ var NoSuchToolError = class extends AISDKError12 {
5364
5387
  this.availableTools = availableTools;
5365
5388
  }
5366
5389
  static isInstance(error) {
5367
- return AISDKError12.hasMarker(error, marker12);
5390
+ return AISDKError13.hasMarker(error, marker12);
5368
5391
  }
5369
5392
  };
5370
5393
  _a12 = symbol12;
5371
5394
 
5372
5395
  // errors/tool-call-repair-error.ts
5373
- import { AISDKError as AISDKError13, getErrorMessage as getErrorMessage4 } from "@ai-sdk/provider";
5396
+ import { AISDKError as AISDKError14, getErrorMessage as getErrorMessage4 } from "@ai-sdk/provider";
5374
5397
  var name13 = "AI_ToolCallRepairError";
5375
5398
  var marker13 = `vercel.ai.error.${name13}`;
5376
5399
  var symbol13 = Symbol.for(marker13);
5377
5400
  var _a13;
5378
- var ToolCallRepairError = class extends AISDKError13 {
5401
+ var ToolCallRepairError = class extends AISDKError14 {
5379
5402
  constructor({
5380
5403
  cause,
5381
5404
  originalError,
@@ -5386,7 +5409,7 @@ var ToolCallRepairError = class extends AISDKError13 {
5386
5409
  this.originalError = originalError;
5387
5410
  }
5388
5411
  static isInstance(error) {
5389
- return AISDKError13.hasMarker(error, marker13);
5412
+ return AISDKError14.hasMarker(error, marker13);
5390
5413
  }
5391
5414
  };
5392
5415
  _a13 = symbol13;
@@ -5413,7 +5436,10 @@ async function parseToolCall({
5413
5436
  repairedToolCall = await repairToolCall({
5414
5437
  toolCall,
5415
5438
  tools,
5416
- parameterSchema: ({ toolName }) => asSchema(tools[toolName].parameters).jsonSchema,
5439
+ parameterSchema: ({ toolName }) => {
5440
+ const { parameters } = tools[toolName];
5441
+ return asSchema(parameters).jsonSchema;
5442
+ },
5417
5443
  system,
5418
5444
  messages,
5419
5445
  error
@@ -5455,7 +5481,7 @@ async function doParseToolCall({
5455
5481
  type: "tool-call",
5456
5482
  toolCallId: toolCall.toolCallId,
5457
5483
  toolName,
5458
- args: parseResult.value
5484
+ args: parseResult == null ? void 0 : parseResult.value
5459
5485
  };
5460
5486
  }
5461
5487
 
@@ -5477,23 +5503,36 @@ function toResponseMessages({
5477
5503
  generateMessageId
5478
5504
  }) {
5479
5505
  const responseMessages = [];
5480
- responseMessages.push({
5481
- role: "assistant",
5482
- content: [
5506
+ const content = [];
5507
+ if (reasoning.length > 0) {
5508
+ content.push(
5483
5509
  ...reasoning.map(
5484
5510
  (part) => part.type === "text" ? { ...part, type: "reasoning" } : { ...part, type: "redacted-reasoning" }
5485
- ),
5486
- // TODO language model v2: switch to order response content (instead of type-based ordering)
5511
+ )
5512
+ );
5513
+ }
5514
+ if (files.length > 0) {
5515
+ content.push(
5487
5516
  ...files.map((file) => ({
5488
5517
  type: "file",
5489
5518
  data: file.base64,
5490
5519
  mediaType: file.mediaType
5491
- })),
5492
- { type: "text", text: text2 },
5493
- ...toolCalls
5494
- ],
5495
- id: messageId
5496
- });
5520
+ }))
5521
+ );
5522
+ }
5523
+ if (text2.length > 0) {
5524
+ content.push({ type: "text", text: text2 });
5525
+ }
5526
+ if (toolCalls.length > 0) {
5527
+ content.push(...toolCalls);
5528
+ }
5529
+ if (content.length > 0) {
5530
+ responseMessages.push({
5531
+ role: "assistant",
5532
+ content,
5533
+ id: messageId
5534
+ });
5535
+ }
5497
5536
  if (toolResults.length > 0) {
5498
5537
  responseMessages.push({
5499
5538
  role: "tool",
@@ -5544,8 +5583,7 @@ async function generateText({
5544
5583
  experimental_output: output,
5545
5584
  experimental_continueSteps: continueSteps = false,
5546
5585
  experimental_telemetry: telemetry,
5547
- experimental_providerMetadata,
5548
- providerOptions = experimental_providerMetadata,
5586
+ providerOptions,
5549
5587
  experimental_activeTools: activeTools,
5550
5588
  experimental_repairToolCall: repairToolCall,
5551
5589
  _internal: {
@@ -5598,7 +5636,7 @@ async function generateText({
5598
5636
  }),
5599
5637
  tracer,
5600
5638
  fn: async (span) => {
5601
- var _a18, _b, _c, _d, _e, _f, _g;
5639
+ var _a18, _b, _c, _d, _e, _f, _g, _h;
5602
5640
  const toolsAndToolChoice = {
5603
5641
  ...prepareToolsAndToolChoice({ tools, toolChoice, activeTools })
5604
5642
  };
@@ -5663,7 +5701,7 @@ async function generateText({
5663
5701
  "gen_ai.system": model.provider,
5664
5702
  "gen_ai.request.model": model.modelId,
5665
5703
  "gen_ai.request.frequency_penalty": settings.frequencyPenalty,
5666
- "gen_ai.request.max_tokens": settings.maxTokens,
5704
+ "gen_ai.request.max_tokens": settings.maxOutputTokens,
5667
5705
  "gen_ai.request.presence_penalty": settings.presencePenalty,
5668
5706
  "gen_ai.request.stop_sequences": settings.stopSequences,
5669
5707
  "gen_ai.request.temperature": settings.temperature,
@@ -5673,7 +5711,7 @@ async function generateText({
5673
5711
  }),
5674
5712
  tracer,
5675
5713
  fn: async (span2) => {
5676
- var _a19, _b2, _c2, _d2, _e2, _f2, _g2, _h;
5714
+ var _a19, _b2, _c2, _d2, _e2, _f2, _g2, _h2;
5677
5715
  const result = await model.doGenerate({
5678
5716
  ...callSettings,
5679
5717
  ...toolsAndToolChoice,
@@ -5689,7 +5727,7 @@ async function generateText({
5689
5727
  timestamp: (_d2 = (_c2 = result.response) == null ? void 0 : _c2.timestamp) != null ? _d2 : currentDate(),
5690
5728
  modelId: (_f2 = (_e2 = result.response) == null ? void 0 : _e2.modelId) != null ? _f2 : model.modelId,
5691
5729
  headers: (_g2 = result.response) == null ? void 0 : _g2.headers,
5692
- body: (_h = result.response) == null ? void 0 : _h.body
5730
+ body: (_h2 = result.response) == null ? void 0 : _h2.body
5693
5731
  };
5694
5732
  span2.setAttributes(
5695
5733
  selectTelemetryAttributes({
@@ -5697,22 +5735,36 @@ async function generateText({
5697
5735
  attributes: {
5698
5736
  "ai.response.finishReason": result.finishReason,
5699
5737
  "ai.response.text": {
5700
- output: () => result.text
5738
+ output: () => {
5739
+ var _a20;
5740
+ return (_a20 = result.text) == null ? void 0 : _a20.text;
5741
+ }
5701
5742
  },
5702
5743
  "ai.response.toolCalls": {
5703
- output: () => JSON.stringify(result.toolCalls)
5744
+ output: () => {
5745
+ var _a20;
5746
+ return JSON.stringify(
5747
+ (_a20 = result.toolCalls) == null ? void 0 : _a20.map((toolCall) => ({
5748
+ toolCallType: toolCall.toolCallType,
5749
+ toolCallId: toolCall.toolCallId,
5750
+ toolName: toolCall.toolName,
5751
+ args: toolCall.args
5752
+ }))
5753
+ );
5754
+ }
5704
5755
  },
5705
5756
  "ai.response.id": responseData.id,
5706
5757
  "ai.response.model": responseData.modelId,
5707
5758
  "ai.response.timestamp": responseData.timestamp.toISOString(),
5708
- "ai.usage.promptTokens": result.usage.promptTokens,
5709
- "ai.usage.completionTokens": result.usage.completionTokens,
5759
+ // TODO rename telemetry attributes to inputTokens and outputTokens
5760
+ "ai.usage.promptTokens": result.usage.inputTokens,
5761
+ "ai.usage.completionTokens": result.usage.outputTokens,
5710
5762
  // standardized gen-ai llm span attributes:
5711
5763
  "gen_ai.response.finish_reasons": [result.finishReason],
5712
5764
  "gen_ai.response.id": responseData.id,
5713
5765
  "gen_ai.response.model": responseData.modelId,
5714
- "gen_ai.usage.input_tokens": result.usage.promptTokens,
5715
- "gen_ai.usage.output_tokens": result.usage.completionTokens
5766
+ "gen_ai.usage.input_tokens": result.usage.inputTokens,
5767
+ "gen_ai.usage.output_tokens": result.usage.outputTokens
5716
5768
  }
5717
5769
  })
5718
5770
  );
@@ -5756,7 +5808,7 @@ async function generateText({
5756
5808
  nextStepType = "tool-result";
5757
5809
  }
5758
5810
  }
5759
- const originalText = (_c = currentModelResponse.text) != null ? _c : "";
5811
+ const originalText = (_d = (_c = currentModelResponse.text) == null ? void 0 : _c.text) != null ? _d : "";
5760
5812
  const stepTextLeadingWhitespaceTrimmed = stepType === "continue" && // only for continue steps
5761
5813
  text2.trimEnd() !== text2 ? originalText.trimStart() : originalText;
5762
5814
  const stepText = nextStepType === "continue" ? removeTextAfterLastWhitespace(stepTextLeadingWhitespaceTrimmed) : stepTextLeadingWhitespaceTrimmed;
@@ -5764,7 +5816,7 @@ async function generateText({
5764
5816
  currentReasoningDetails = asReasoningDetails(
5765
5817
  currentModelResponse.reasoning
5766
5818
  );
5767
- sources.push(...(_d = currentModelResponse.sources) != null ? _d : []);
5819
+ sources.push(...(_e = currentModelResponse.sources) != null ? _e : []);
5768
5820
  if (stepType === "continue") {
5769
5821
  const lastMessage = responseMessages[responseMessages.length - 1];
5770
5822
  if (typeof lastMessage.content === "string") {
@@ -5796,21 +5848,20 @@ async function generateText({
5796
5848
  reasoning: asReasoningText(currentReasoningDetails),
5797
5849
  reasoningDetails: currentReasoningDetails,
5798
5850
  files: asFiles(currentModelResponse.files),
5799
- sources: (_e = currentModelResponse.sources) != null ? _e : [],
5851
+ sources: (_f = currentModelResponse.sources) != null ? _f : [],
5800
5852
  toolCalls: currentToolCalls,
5801
5853
  toolResults: currentToolResults,
5802
5854
  finishReason: currentModelResponse.finishReason,
5803
5855
  usage: currentUsage,
5804
5856
  warnings: currentModelResponse.warnings,
5805
5857
  logprobs: currentModelResponse.logprobs,
5806
- request: (_f = currentModelResponse.request) != null ? _f : {},
5858
+ request: (_g = currentModelResponse.request) != null ? _g : {},
5807
5859
  response: {
5808
5860
  ...currentModelResponse.response,
5809
5861
  // deep clone msgs to avoid mutating past messages in multi-step:
5810
5862
  messages: structuredClone(responseMessages)
5811
5863
  },
5812
5864
  providerMetadata: currentModelResponse.providerMetadata,
5813
- experimental_providerMetadata: currentModelResponse.providerMetadata,
5814
5865
  isContinued: nextStepType === "continue"
5815
5866
  };
5816
5867
  steps.push(currentStepResult);
@@ -5823,13 +5874,27 @@ async function generateText({
5823
5874
  attributes: {
5824
5875
  "ai.response.finishReason": currentModelResponse.finishReason,
5825
5876
  "ai.response.text": {
5826
- output: () => currentModelResponse.text
5877
+ output: () => {
5878
+ var _a19;
5879
+ return (_a19 = currentModelResponse.text) == null ? void 0 : _a19.text;
5880
+ }
5827
5881
  },
5828
5882
  "ai.response.toolCalls": {
5829
- output: () => JSON.stringify(currentModelResponse.toolCalls)
5883
+ output: () => {
5884
+ var _a19;
5885
+ return JSON.stringify(
5886
+ (_a19 = currentModelResponse.toolCalls) == null ? void 0 : _a19.map((toolCall) => ({
5887
+ toolCallType: toolCall.toolCallType,
5888
+ toolCallId: toolCall.toolCallId,
5889
+ toolName: toolCall.toolName,
5890
+ args: toolCall.args
5891
+ }))
5892
+ );
5893
+ }
5830
5894
  },
5831
- "ai.usage.promptTokens": currentModelResponse.usage.promptTokens,
5832
- "ai.usage.completionTokens": currentModelResponse.usage.completionTokens
5895
+ // TODO rename telemetry attributes to inputTokens and outputTokens
5896
+ "ai.usage.promptTokens": currentModelResponse.usage.inputTokens,
5897
+ "ai.usage.completionTokens": currentModelResponse.usage.outputTokens
5833
5898
  }
5834
5899
  })
5835
5900
  );
@@ -5857,7 +5922,7 @@ async function generateText({
5857
5922
  finishReason: currentModelResponse.finishReason,
5858
5923
  usage,
5859
5924
  warnings: currentModelResponse.warnings,
5860
- request: (_g = currentModelResponse.request) != null ? _g : {},
5925
+ request: (_h = currentModelResponse.request) != null ? _h : {},
5861
5926
  response: {
5862
5927
  ...currentModelResponse.response,
5863
5928
  messages: responseMessages
@@ -5958,7 +6023,6 @@ var DefaultGenerateTextResult = class {
5958
6023
  this.request = options.request;
5959
6024
  this.response = options.response;
5960
6025
  this.steps = options.steps;
5961
- this.experimental_providerMetadata = options.providerMetadata;
5962
6026
  this.providerMetadata = options.providerMetadata;
5963
6027
  this.logprobs = options.logprobs;
5964
6028
  this.outputResolver = options.outputResolver;
@@ -5972,10 +6036,28 @@ function asReasoningDetails(reasoning) {
5972
6036
  if (reasoning == null) {
5973
6037
  return [];
5974
6038
  }
5975
- if (typeof reasoning === "string") {
5976
- return [{ type: "text", text: reasoning }];
6039
+ const result = [];
6040
+ let activeReasoningText;
6041
+ for (const part of reasoning) {
6042
+ if (part.reasoningType === "text") {
6043
+ if (activeReasoningText == null) {
6044
+ activeReasoningText = { type: "text", text: part.text };
6045
+ result.push(activeReasoningText);
6046
+ } else {
6047
+ activeReasoningText.text += part.text;
6048
+ }
6049
+ } else if (part.reasoningType === "signature") {
6050
+ if (activeReasoningText == null) {
6051
+ activeReasoningText = { type: "text", text: "" };
6052
+ result.push(activeReasoningText);
6053
+ }
6054
+ activeReasoningText.signature = part.signature;
6055
+ activeReasoningText = void 0;
6056
+ } else if (part.reasoningType === "redacted") {
6057
+ result.push({ type: "redacted", data: part.data });
6058
+ }
5977
6059
  }
5978
- return reasoning;
6060
+ return result;
5979
6061
  }
5980
6062
  function asFiles(files) {
5981
6063
  var _a17;
@@ -5992,7 +6074,7 @@ import { safeParseJSON as safeParseJSON4, safeValidateTypes as safeValidateTypes
5992
6074
 
5993
6075
  // errors/index.ts
5994
6076
  import {
5995
- AISDKError as AISDKError16,
6077
+ AISDKError as AISDKError17,
5996
6078
  APICallError as APICallError2,
5997
6079
  EmptyResponseBodyError,
5998
6080
  InvalidPromptError as InvalidPromptError2,
@@ -6006,12 +6088,12 @@ import {
6006
6088
  } from "@ai-sdk/provider";
6007
6089
 
6008
6090
  // errors/invalid-stream-part-error.ts
6009
- import { AISDKError as AISDKError14 } from "@ai-sdk/provider";
6091
+ import { AISDKError as AISDKError15 } from "@ai-sdk/provider";
6010
6092
  var name14 = "AI_InvalidStreamPartError";
6011
6093
  var marker14 = `vercel.ai.error.${name14}`;
6012
6094
  var symbol14 = Symbol.for(marker14);
6013
6095
  var _a14;
6014
- var InvalidStreamPartError = class extends AISDKError14 {
6096
+ var InvalidStreamPartError = class extends AISDKError15 {
6015
6097
  constructor({
6016
6098
  chunk,
6017
6099
  message
@@ -6021,18 +6103,18 @@ var InvalidStreamPartError = class extends AISDKError14 {
6021
6103
  this.chunk = chunk;
6022
6104
  }
6023
6105
  static isInstance(error) {
6024
- return AISDKError14.hasMarker(error, marker14);
6106
+ return AISDKError15.hasMarker(error, marker14);
6025
6107
  }
6026
6108
  };
6027
6109
  _a14 = symbol14;
6028
6110
 
6029
6111
  // errors/mcp-client-error.ts
6030
- import { AISDKError as AISDKError15 } from "@ai-sdk/provider";
6112
+ import { AISDKError as AISDKError16 } from "@ai-sdk/provider";
6031
6113
  var name15 = "AI_MCPClientError";
6032
6114
  var marker15 = `vercel.ai.error.${name15}`;
6033
6115
  var symbol15 = Symbol.for(marker15);
6034
6116
  var _a15;
6035
- var MCPClientError = class extends AISDKError15 {
6117
+ var MCPClientError = class extends AISDKError16 {
6036
6118
  constructor({
6037
6119
  name: name17 = "MCPClientError",
6038
6120
  message,
@@ -6042,7 +6124,7 @@ var MCPClientError = class extends AISDKError15 {
6042
6124
  this[_a15] = true;
6043
6125
  }
6044
6126
  static isInstance(error) {
6045
- return AISDKError15.hasMarker(error, marker15);
6127
+ return AISDKError16.hasMarker(error, marker15);
6046
6128
  }
6047
6129
  };
6048
6130
  _a15 = symbol15;
@@ -6175,18 +6257,18 @@ function smoothStream({
6175
6257
  let buffer = "";
6176
6258
  return new TransformStream({
6177
6259
  async transform(chunk, controller) {
6178
- if (chunk.type !== "text-delta") {
6260
+ if (chunk.type !== "text") {
6179
6261
  if (buffer.length > 0) {
6180
- controller.enqueue({ type: "text-delta", textDelta: buffer });
6262
+ controller.enqueue({ type: "text", text: buffer });
6181
6263
  buffer = "";
6182
6264
  }
6183
6265
  controller.enqueue(chunk);
6184
6266
  return;
6185
6267
  }
6186
- buffer += chunk.textDelta;
6268
+ buffer += chunk.text;
6187
6269
  let match;
6188
6270
  while ((match = detectChunk(buffer)) != null) {
6189
- controller.enqueue({ type: "text-delta", textDelta: match });
6271
+ controller.enqueue({ type: "text", text: match });
6190
6272
  buffer = buffer.slice(match.length);
6191
6273
  await delay2(delayInMs);
6192
6274
  }
@@ -6196,7 +6278,7 @@ function smoothStream({
6196
6278
  }
6197
6279
 
6198
6280
  // core/generate-text/stream-text.ts
6199
- import { AISDKError as AISDKError17 } from "@ai-sdk/provider";
6281
+ import { AISDKError as AISDKError18 } from "@ai-sdk/provider";
6200
6282
  import { createIdGenerator as createIdGenerator4 } from "@ai-sdk/provider-utils";
6201
6283
 
6202
6284
  // util/as-array.ts
@@ -6345,10 +6427,8 @@ function runToolsTransformation({
6345
6427
  async transform(chunk, controller) {
6346
6428
  const chunkType = chunk.type;
6347
6429
  switch (chunkType) {
6348
- case "text-delta":
6430
+ case "text":
6349
6431
  case "reasoning":
6350
- case "reasoning-signature":
6351
- case "redacted-reasoning":
6352
6432
  case "source":
6353
6433
  case "response-metadata":
6354
6434
  case "error": {
@@ -6356,12 +6436,13 @@ function runToolsTransformation({
6356
6436
  break;
6357
6437
  }
6358
6438
  case "file": {
6359
- controller.enqueue(
6360
- new DefaultGeneratedFileWithType({
6439
+ controller.enqueue({
6440
+ type: "file",
6441
+ file: new DefaultGeneratedFileWithType({
6361
6442
  data: chunk.data,
6362
6443
  mediaType: chunk.mediaType
6363
6444
  })
6364
- );
6445
+ });
6365
6446
  break;
6366
6447
  }
6367
6448
  case "tool-call-delta": {
@@ -6471,7 +6552,7 @@ function runToolsTransformation({
6471
6552
  finishReason: chunk.finishReason,
6472
6553
  logprobs: chunk.logprobs,
6473
6554
  usage: calculateLanguageModelUsage2(chunk.usage),
6474
- experimental_providerMetadata: chunk.providerMetadata
6555
+ providerMetadata: chunk.providerMetadata
6475
6556
  };
6476
6557
  break;
6477
6558
  }
@@ -6537,8 +6618,7 @@ function streamText({
6537
6618
  experimental_output: output,
6538
6619
  experimental_continueSteps: continueSteps = false,
6539
6620
  experimental_telemetry: telemetry,
6540
- experimental_providerMetadata,
6541
- providerOptions = experimental_providerMetadata,
6621
+ providerOptions,
6542
6622
  experimental_toolCallStreaming = false,
6543
6623
  toolCallStreaming = experimental_toolCallStreaming,
6544
6624
  experimental_activeTools: activeTools,
@@ -6601,7 +6681,7 @@ function createOutputTransformStream(output) {
6601
6681
  partialOutput = void 0
6602
6682
  }) {
6603
6683
  controller.enqueue({
6604
- part: { type: "text-delta", textDelta: textChunk },
6684
+ part: { type: "text", text: textChunk },
6605
6685
  partialOutput
6606
6686
  });
6607
6687
  textChunk = "";
@@ -6611,12 +6691,12 @@ function createOutputTransformStream(output) {
6611
6691
  if (chunk.type === "step-finish") {
6612
6692
  publishTextChunk({ controller });
6613
6693
  }
6614
- if (chunk.type !== "text-delta") {
6694
+ if (chunk.type !== "text") {
6615
6695
  controller.enqueue({ part: chunk, partialOutput: void 0 });
6616
6696
  return;
6617
6697
  }
6618
- text2 += chunk.textDelta;
6619
- textChunk += chunk.textDelta;
6698
+ text2 += chunk.text;
6699
+ textChunk += chunk.text;
6620
6700
  const result = output.parsePartial({ text: text2 });
6621
6701
  if (result != null) {
6622
6702
  const currentJson = JSON.stringify(result.partial);
@@ -6711,44 +6791,44 @@ var DefaultStreamTextResult = class {
6711
6791
  async transform(chunk, controller) {
6712
6792
  controller.enqueue(chunk);
6713
6793
  const { part } = chunk;
6714
- if (part.type === "text-delta" || part.type === "reasoning" || part.type === "source" || part.type === "tool-call" || part.type === "tool-result" || part.type === "tool-call-streaming-start" || part.type === "tool-call-delta") {
6794
+ if (part.type === "text" || part.type === "reasoning" || part.type === "source" || part.type === "tool-call" || part.type === "tool-result" || part.type === "tool-call-streaming-start" || part.type === "tool-call-delta") {
6715
6795
  await (onChunk == null ? void 0 : onChunk({ chunk: part }));
6716
6796
  }
6717
6797
  if (part.type === "error") {
6718
6798
  await (onError == null ? void 0 : onError({ error: part.error }));
6719
6799
  }
6720
- if (part.type === "text-delta") {
6721
- recordedStepText += part.textDelta;
6722
- recordedContinuationText += part.textDelta;
6723
- recordedFullText += part.textDelta;
6800
+ if (part.type === "text") {
6801
+ recordedStepText += part.text;
6802
+ recordedContinuationText += part.text;
6803
+ recordedFullText += part.text;
6724
6804
  }
6725
6805
  if (part.type === "reasoning") {
6726
- if (activeReasoningText == null) {
6727
- activeReasoningText = { type: "text", text: part.textDelta };
6728
- stepReasoning.push(activeReasoningText);
6729
- } else {
6730
- activeReasoningText.text += part.textDelta;
6731
- }
6732
- }
6733
- if (part.type === "reasoning-signature") {
6734
- if (activeReasoningText == null) {
6735
- throw new AISDKError17({
6736
- name: "InvalidStreamPart",
6737
- message: "reasoning-signature without reasoning"
6738
- });
6806
+ if (part.reasoningType === "text") {
6807
+ if (activeReasoningText == null) {
6808
+ activeReasoningText = { type: "text", text: part.text };
6809
+ stepReasoning.push(activeReasoningText);
6810
+ } else {
6811
+ activeReasoningText.text += part.text;
6812
+ }
6813
+ } else if (part.reasoningType === "signature") {
6814
+ if (activeReasoningText == null) {
6815
+ throw new AISDKError18({
6816
+ name: "InvalidStreamPart",
6817
+ message: "reasoning-signature without reasoning"
6818
+ });
6819
+ }
6820
+ activeReasoningText.signature = part.signature;
6821
+ activeReasoningText = void 0;
6822
+ } else if (part.reasoningType === "redacted") {
6823
+ stepReasoning.push({ type: "redacted", data: part.data });
6739
6824
  }
6740
- activeReasoningText.signature = part.signature;
6741
- activeReasoningText = void 0;
6742
- }
6743
- if (part.type === "redacted-reasoning") {
6744
- stepReasoning.push({ type: "redacted", data: part.data });
6745
6825
  }
6746
6826
  if (part.type === "file") {
6747
- stepFiles.push(part);
6827
+ stepFiles.push(part.file);
6748
6828
  }
6749
6829
  if (part.type === "source") {
6750
- recordedSources.push(part.source);
6751
- recordedStepSources.push(part.source);
6830
+ recordedSources.push(part);
6831
+ recordedStepSources.push(part);
6752
6832
  }
6753
6833
  if (part.type === "tool-call") {
6754
6834
  recordedToolCalls.push(part);
@@ -6799,8 +6879,7 @@ var DefaultStreamTextResult = class {
6799
6879
  ...part.response,
6800
6880
  messages: [...recordedResponse.messages, ...stepMessages]
6801
6881
  },
6802
- providerMetadata: part.experimental_providerMetadata,
6803
- experimental_providerMetadata: part.experimental_providerMetadata,
6882
+ providerMetadata: part.providerMetadata,
6804
6883
  isContinued: part.isContinued
6805
6884
  };
6806
6885
  await (onStepFinish == null ? void 0 : onStepFinish(currentStepResult));
@@ -6841,9 +6920,7 @@ var DefaultStreamTextResult = class {
6841
6920
  self.responsePromise.resolve(lastStep.response);
6842
6921
  self.toolCallsPromise.resolve(lastStep.toolCalls);
6843
6922
  self.toolResultsPromise.resolve(lastStep.toolResults);
6844
- self.providerMetadataPromise.resolve(
6845
- lastStep.experimental_providerMetadata
6846
- );
6923
+ self.providerMetadataPromise.resolve(lastStep.providerMetadata);
6847
6924
  self.reasoningPromise.resolve(lastStep.reasoning);
6848
6925
  self.reasoningDetailsPromise.resolve(lastStep.reasoningDetails);
6849
6926
  const finishReason = recordedFinishReason != null ? recordedFinishReason : "unknown";
@@ -6873,7 +6950,6 @@ var DefaultStreamTextResult = class {
6873
6950
  response: lastStep.response,
6874
6951
  warnings: lastStep.warnings,
6875
6952
  providerMetadata: lastStep.providerMetadata,
6876
- experimental_providerMetadata: lastStep.experimental_providerMetadata,
6877
6953
  steps: recordedSteps
6878
6954
  }));
6879
6955
  rootSpan.setAttributes(
@@ -7017,7 +7093,7 @@ var DefaultStreamTextResult = class {
7017
7093
  "gen_ai.system": model.provider,
7018
7094
  "gen_ai.request.model": model.modelId,
7019
7095
  "gen_ai.request.frequency_penalty": settings.frequencyPenalty,
7020
- "gen_ai.request.max_tokens": settings.maxTokens,
7096
+ "gen_ai.request.max_tokens": settings.maxOutputTokens,
7021
7097
  "gen_ai.request.presence_penalty": settings.presencePenalty,
7022
7098
  "gen_ai.request.stop_sequences": settings.stopSequences,
7023
7099
  "gen_ai.request.temperature": settings.temperature,
@@ -7086,10 +7162,10 @@ var DefaultStreamTextResult = class {
7086
7162
  chunk
7087
7163
  }) {
7088
7164
  controller.enqueue(chunk);
7089
- stepText += chunk.textDelta;
7090
- fullStepText += chunk.textDelta;
7165
+ stepText += chunk.text;
7166
+ fullStepText += chunk.text;
7091
7167
  chunkTextPublished = true;
7092
- hasWhitespaceSuffix = chunk.textDelta.trimEnd() !== chunk.textDelta;
7168
+ hasWhitespaceSuffix = chunk.text.trimEnd() !== chunk.text;
7093
7169
  }
7094
7170
  self.addStream(
7095
7171
  transformedStream.pipeThrough(
@@ -7112,14 +7188,14 @@ var DefaultStreamTextResult = class {
7112
7188
  warnings: warnings != null ? warnings : []
7113
7189
  });
7114
7190
  }
7115
- if (chunk.type === "text-delta" && chunk.textDelta.length === 0) {
7191
+ if (chunk.type === "text" && chunk.text.length === 0) {
7116
7192
  return;
7117
7193
  }
7118
7194
  const chunkType = chunk.type;
7119
7195
  switch (chunkType) {
7120
- case "text-delta": {
7196
+ case "text": {
7121
7197
  if (continueSteps) {
7122
- const trimmedChunkText = inWhitespacePrefix && hasLeadingWhitespace ? chunk.textDelta.trimStart() : chunk.textDelta;
7198
+ const trimmedChunkText = inWhitespacePrefix && hasLeadingWhitespace ? chunk.text.trimStart() : chunk.text;
7123
7199
  if (trimmedChunkText.length === 0) {
7124
7200
  break;
7125
7201
  }
@@ -7131,8 +7207,8 @@ var DefaultStreamTextResult = class {
7131
7207
  await publishTextChunk({
7132
7208
  controller,
7133
7209
  chunk: {
7134
- type: "text-delta",
7135
- textDelta: split.prefix + split.whitespace
7210
+ type: "text",
7211
+ text: split.prefix + split.whitespace
7136
7212
  }
7137
7213
  });
7138
7214
  }
@@ -7143,35 +7219,31 @@ var DefaultStreamTextResult = class {
7143
7219
  }
7144
7220
  case "reasoning": {
7145
7221
  controller.enqueue(chunk);
7146
- if (activeReasoningText2 == null) {
7147
- activeReasoningText2 = {
7148
- type: "text",
7149
- text: chunk.textDelta
7150
- };
7151
- stepReasoning2.push(activeReasoningText2);
7152
- } else {
7153
- activeReasoningText2.text += chunk.textDelta;
7154
- }
7155
- break;
7156
- }
7157
- case "reasoning-signature": {
7158
- controller.enqueue(chunk);
7159
- if (activeReasoningText2 == null) {
7160
- throw new InvalidStreamPartError({
7161
- chunk,
7162
- message: "reasoning-signature without reasoning"
7222
+ if (chunk.reasoningType === "text") {
7223
+ if (activeReasoningText2 == null) {
7224
+ activeReasoningText2 = {
7225
+ type: "text",
7226
+ text: chunk.text
7227
+ };
7228
+ stepReasoning2.push(activeReasoningText2);
7229
+ } else {
7230
+ activeReasoningText2.text += chunk.text;
7231
+ }
7232
+ } else if (chunk.reasoningType === "signature") {
7233
+ if (activeReasoningText2 == null) {
7234
+ throw new InvalidStreamPartError({
7235
+ chunk,
7236
+ message: "reasoning-signature without reasoning"
7237
+ });
7238
+ }
7239
+ activeReasoningText2.signature = chunk.signature;
7240
+ activeReasoningText2 = void 0;
7241
+ } else if (chunk.reasoningType === "redacted") {
7242
+ stepReasoning2.push({
7243
+ type: "redacted",
7244
+ data: chunk.data
7163
7245
  });
7164
7246
  }
7165
- activeReasoningText2.signature = chunk.signature;
7166
- activeReasoningText2 = void 0;
7167
- break;
7168
- }
7169
- case "redacted-reasoning": {
7170
- controller.enqueue(chunk);
7171
- stepReasoning2.push({
7172
- type: "redacted",
7173
- data: chunk.data
7174
- });
7175
7247
  break;
7176
7248
  }
7177
7249
  case "tool-call": {
@@ -7195,7 +7267,7 @@ var DefaultStreamTextResult = class {
7195
7267
  case "finish": {
7196
7268
  stepUsage = chunk.usage;
7197
7269
  stepFinishReason = chunk.finishReason;
7198
- stepProviderMetadata = chunk.experimental_providerMetadata;
7270
+ stepProviderMetadata = chunk.providerMetadata;
7199
7271
  stepLogProbs = chunk.logprobs;
7200
7272
  const msToFinish = now2() - startTimestampMs;
7201
7273
  doStreamSpan.addEvent("ai.stream.finish");
@@ -7206,7 +7278,7 @@ var DefaultStreamTextResult = class {
7206
7278
  break;
7207
7279
  }
7208
7280
  case "file": {
7209
- stepFiles2.push(chunk);
7281
+ stepFiles2.push(chunk.file);
7210
7282
  controller.enqueue(chunk);
7211
7283
  break;
7212
7284
  }
@@ -7247,10 +7319,7 @@ var DefaultStreamTextResult = class {
7247
7319
  stepType2 === "continue" && !chunkTextPublished)) {
7248
7320
  await publishTextChunk({
7249
7321
  controller,
7250
- chunk: {
7251
- type: "text-delta",
7252
- textDelta: chunkBuffer
7253
- }
7322
+ chunk: { type: "text", text: chunkBuffer }
7254
7323
  });
7255
7324
  chunkBuffer = "";
7256
7325
  }
@@ -7287,7 +7356,6 @@ var DefaultStreamTextResult = class {
7287
7356
  finishReason: stepFinishReason,
7288
7357
  usage: stepUsage,
7289
7358
  providerMetadata: stepProviderMetadata,
7290
- experimental_providerMetadata: stepProviderMetadata,
7291
7359
  logprobs: stepLogProbs,
7292
7360
  request: stepRequest,
7293
7361
  response: {
@@ -7305,7 +7373,6 @@ var DefaultStreamTextResult = class {
7305
7373
  finishReason: stepFinishReason,
7306
7374
  usage: combinedUsage,
7307
7375
  providerMetadata: stepProviderMetadata,
7308
- experimental_providerMetadata: stepProviderMetadata,
7309
7376
  logprobs: stepLogProbs,
7310
7377
  response: {
7311
7378
  ...stepResponse,
@@ -7391,9 +7458,6 @@ var DefaultStreamTextResult = class {
7391
7458
  get finishReason() {
7392
7459
  return this.finishReasonPromise.value;
7393
7460
  }
7394
- get experimental_providerMetadata() {
7395
- return this.providerMetadataPromise.value;
7396
- }
7397
7461
  get providerMetadata() {
7398
7462
  return this.providerMetadataPromise.value;
7399
7463
  }
@@ -7445,8 +7509,8 @@ var DefaultStreamTextResult = class {
7445
7509
  this.teeStream().pipeThrough(
7446
7510
  new TransformStream({
7447
7511
  transform({ part }, controller) {
7448
- if (part.type === "text-delta") {
7449
- controller.enqueue(part.textDelta);
7512
+ if (part.type === "text") {
7513
+ controller.enqueue(part.text);
7450
7514
  }
7451
7515
  }
7452
7516
  })
@@ -7504,52 +7568,45 @@ var DefaultStreamTextResult = class {
7504
7568
  transform: async (chunk, controller) => {
7505
7569
  const chunkType = chunk.type;
7506
7570
  switch (chunkType) {
7507
- case "text-delta": {
7508
- controller.enqueue(formatDataStreamPart("text", chunk.textDelta));
7571
+ case "text": {
7572
+ controller.enqueue(formatDataStreamPart("text", chunk.text));
7509
7573
  break;
7510
7574
  }
7511
7575
  case "reasoning": {
7512
7576
  if (sendReasoning) {
7513
- controller.enqueue(
7514
- formatDataStreamPart("reasoning", chunk.textDelta)
7515
- );
7516
- }
7517
- break;
7518
- }
7519
- case "redacted-reasoning": {
7520
- if (sendReasoning) {
7521
- controller.enqueue(
7522
- formatDataStreamPart("redacted_reasoning", {
7523
- data: chunk.data
7524
- })
7525
- );
7526
- }
7527
- break;
7528
- }
7529
- case "reasoning-signature": {
7530
- if (sendReasoning) {
7531
- controller.enqueue(
7532
- formatDataStreamPart("reasoning_signature", {
7533
- signature: chunk.signature
7534
- })
7535
- );
7577
+ if (chunk.reasoningType === "text") {
7578
+ controller.enqueue(
7579
+ formatDataStreamPart("reasoning", chunk.text)
7580
+ );
7581
+ } else if (chunk.reasoningType === "signature") {
7582
+ controller.enqueue(
7583
+ formatDataStreamPart("reasoning_signature", {
7584
+ signature: chunk.signature
7585
+ })
7586
+ );
7587
+ } else if (chunk.reasoningType === "redacted") {
7588
+ controller.enqueue(
7589
+ formatDataStreamPart("redacted_reasoning", {
7590
+ data: chunk.data
7591
+ })
7592
+ );
7593
+ }
7536
7594
  }
7537
7595
  break;
7538
7596
  }
7539
7597
  case "file": {
7540
7598
  controller.enqueue(
7599
+ // TODO update protocol to v2 or replace with event stream
7541
7600
  formatDataStreamPart("file", {
7542
- mimeType: chunk.mediaType,
7543
- data: chunk.base64
7601
+ mimeType: chunk.file.mediaType,
7602
+ data: chunk.file.base64
7544
7603
  })
7545
7604
  );
7546
7605
  break;
7547
7606
  }
7548
7607
  case "source": {
7549
7608
  if (sendSources) {
7550
- controller.enqueue(
7551
- formatDataStreamPart("source", chunk.source)
7552
- );
7609
+ controller.enqueue(formatDataStreamPart("source", chunk));
7553
7610
  }
7554
7611
  break;
7555
7612
  }
@@ -7744,8 +7801,8 @@ var DefaultStreamTextResult = class {
7744
7801
  };
7745
7802
 
7746
7803
  // errors/no-transcript-generated-error.ts
7747
- import { AISDKError as AISDKError18 } from "@ai-sdk/provider";
7748
- var NoTranscriptGeneratedError = class extends AISDKError18 {
7804
+ import { AISDKError as AISDKError19 } from "@ai-sdk/provider";
7805
+ var NoTranscriptGeneratedError = class extends AISDKError19 {
7749
7806
  constructor(options) {
7750
7807
  super({
7751
7808
  name: "AI_NoTranscriptGeneratedError",
@@ -7894,15 +7951,15 @@ function extractReasoningMiddleware({
7894
7951
  wrapGenerate: async ({ doGenerate }) => {
7895
7952
  const { text: rawText, ...rest } = await doGenerate();
7896
7953
  if (rawText == null) {
7897
- return { text: rawText, ...rest };
7954
+ return { text: void 0, ...rest };
7898
7955
  }
7899
- const text2 = startWithReasoning ? openingTag + rawText : rawText;
7956
+ const text2 = startWithReasoning ? openingTag + rawText.text : rawText.text;
7900
7957
  const regexp = new RegExp(`${openingTag}(.*?)${closingTag}`, "gs");
7901
7958
  const matches = Array.from(text2.matchAll(regexp));
7902
7959
  if (!matches.length) {
7903
- return { text: text2, ...rest };
7960
+ return { text: { type: "text", text: text2 }, ...rest };
7904
7961
  }
7905
- const reasoning = matches.map((match) => match[1]).join(separator);
7962
+ const reasoningText = matches.map((match) => match[1]).join(separator);
7906
7963
  let textWithoutReasoning = text2;
7907
7964
  for (let i = matches.length - 1; i >= 0; i--) {
7908
7965
  const match = matches[i];
@@ -7912,7 +7969,17 @@ function extractReasoningMiddleware({
7912
7969
  );
7913
7970
  textWithoutReasoning = beforeMatch + (beforeMatch.length > 0 && afterMatch.length > 0 ? separator : "") + afterMatch;
7914
7971
  }
7915
- return { ...rest, text: textWithoutReasoning, reasoning };
7972
+ return {
7973
+ ...rest,
7974
+ text: { type: "text", text: textWithoutReasoning },
7975
+ reasoning: reasoningText.length > 0 ? [
7976
+ {
7977
+ type: "reasoning",
7978
+ reasoningType: "text",
7979
+ text: reasoningText
7980
+ }
7981
+ ] : void 0
7982
+ };
7916
7983
  },
7917
7984
  wrapStream: async ({ doStream }) => {
7918
7985
  const { stream, ...rest } = await doStream();
@@ -7925,18 +7992,24 @@ function extractReasoningMiddleware({
7925
7992
  stream: stream.pipeThrough(
7926
7993
  new TransformStream({
7927
7994
  transform: (chunk, controller) => {
7928
- if (chunk.type !== "text-delta") {
7995
+ if (chunk.type !== "text") {
7929
7996
  controller.enqueue(chunk);
7930
7997
  return;
7931
7998
  }
7932
- buffer += chunk.textDelta;
7999
+ buffer += chunk.text;
7933
8000
  function publish(text2) {
7934
8001
  if (text2.length > 0) {
7935
8002
  const prefix = afterSwitch && (isReasoning ? !isFirstReasoning : !isFirstText) ? separator : "";
7936
- controller.enqueue({
7937
- type: isReasoning ? "reasoning" : "text-delta",
7938
- textDelta: prefix + text2
7939
- });
8003
+ controller.enqueue(
8004
+ isReasoning ? {
8005
+ type: "reasoning",
8006
+ reasoningType: "text",
8007
+ text: prefix + text2
8008
+ } : {
8009
+ type: "text",
8010
+ text: prefix + text2
8011
+ }
8012
+ );
7940
8013
  afterSwitch = false;
7941
8014
  if (isReasoning) {
7942
8015
  isFirstReasoning = false;
@@ -7983,43 +8056,12 @@ function simulateStreamingMiddleware() {
7983
8056
  start(controller) {
7984
8057
  controller.enqueue({ type: "response-metadata", ...result.response });
7985
8058
  if (result.reasoning) {
7986
- if (typeof result.reasoning === "string") {
7987
- controller.enqueue({
7988
- type: "reasoning",
7989
- textDelta: result.reasoning
7990
- });
7991
- } else {
7992
- for (const reasoning of result.reasoning) {
7993
- switch (reasoning.type) {
7994
- case "text": {
7995
- controller.enqueue({
7996
- type: "reasoning",
7997
- textDelta: reasoning.text
7998
- });
7999
- if (reasoning.signature != null) {
8000
- controller.enqueue({
8001
- type: "reasoning-signature",
8002
- signature: reasoning.signature
8003
- });
8004
- }
8005
- break;
8006
- }
8007
- case "redacted": {
8008
- controller.enqueue({
8009
- type: "redacted-reasoning",
8010
- data: reasoning.data
8011
- });
8012
- break;
8013
- }
8014
- }
8015
- }
8059
+ for (const reasoningPart of result.reasoning) {
8060
+ controller.enqueue(reasoningPart);
8016
8061
  }
8017
8062
  }
8018
8063
  if (result.text) {
8019
- controller.enqueue({
8020
- type: "text-delta",
8021
- textDelta: result.text
8022
- });
8064
+ controller.enqueue(result.text);
8023
8065
  }
8024
8066
  if (result.toolCalls) {
8025
8067
  for (const toolCall of result.toolCalls) {
@@ -8030,10 +8072,7 @@ function simulateStreamingMiddleware() {
8030
8072
  toolName: toolCall.toolName,
8031
8073
  argsTextDelta: toolCall.args
8032
8074
  });
8033
- controller.enqueue({
8034
- type: "tool-call",
8035
- ...toolCall
8036
- });
8075
+ controller.enqueue(toolCall);
8037
8076
  }
8038
8077
  }
8039
8078
  controller.enqueue({
@@ -8121,7 +8160,7 @@ function appendClientMessage({
8121
8160
  }
8122
8161
 
8123
8162
  // core/prompt/append-response-messages.ts
8124
- import { AISDKError as AISDKError19 } from "@ai-sdk/provider";
8163
+ import { AISDKError as AISDKError20 } from "@ai-sdk/provider";
8125
8164
  function appendResponseMessages({
8126
8165
  messages,
8127
8166
  responseMessages,
@@ -8204,7 +8243,7 @@ function appendResponseMessages({
8204
8243
  break;
8205
8244
  case "file":
8206
8245
  if (part.data instanceof URL) {
8207
- throw new AISDKError19({
8246
+ throw new AISDKError20({
8208
8247
  name: "InvalidAssistantFileData",
8209
8248
  message: "File data cannot be a URL"
8210
8249
  });
@@ -8338,7 +8377,7 @@ function customProvider({
8338
8377
  var experimental_customProvider = customProvider;
8339
8378
 
8340
8379
  // core/registry/no-such-provider-error.ts
8341
- import { AISDKError as AISDKError20, NoSuchModelError as NoSuchModelError3 } from "@ai-sdk/provider";
8380
+ import { AISDKError as AISDKError21, NoSuchModelError as NoSuchModelError3 } from "@ai-sdk/provider";
8342
8381
  var name16 = "AI_NoSuchProviderError";
8343
8382
  var marker16 = `vercel.ai.error.${name16}`;
8344
8383
  var symbol16 = Symbol.for(marker16);
@@ -8357,7 +8396,7 @@ var NoSuchProviderError = class extends NoSuchModelError3 {
8357
8396
  this.availableProviders = availableProviders;
8358
8397
  }
8359
8398
  static isInstance(error) {
8360
- return AISDKError20.hasMarker(error, marker16);
8399
+ return AISDKError21.hasMarker(error, marker16);
8361
8400
  }
8362
8401
  };
8363
8402
  _a16 = symbol16;
@@ -8408,7 +8447,7 @@ var DefaultProviderRegistry = class {
8408
8447
  message: `Invalid ${modelType} id for registry: ${id} (must be in the format "providerId${this.separator}modelId")`
8409
8448
  });
8410
8449
  }
8411
- return [id.slice(0, index), id.slice(index + 1)];
8450
+ return [id.slice(0, index), id.slice(index + this.separator.length)];
8412
8451
  }
8413
8452
  languageModel(id) {
8414
8453
  var _a17, _b;
@@ -8928,6 +8967,7 @@ var MCPClient = class {
8928
8967
  async tools({
8929
8968
  schemas = "automatic"
8930
8969
  } = {}) {
8970
+ var _a17;
8931
8971
  const tools = {};
8932
8972
  try {
8933
8973
  const listToolsResult = await this.listTools();
@@ -8935,14 +8975,18 @@ var MCPClient = class {
8935
8975
  if (schemas !== "automatic" && !(name17 in schemas)) {
8936
8976
  continue;
8937
8977
  }
8938
- const parameters = schemas === "automatic" ? jsonSchema(inputSchema) : schemas[name17].parameters;
8978
+ const parameters = schemas === "automatic" ? jsonSchema({
8979
+ ...inputSchema,
8980
+ properties: (_a17 = inputSchema.properties) != null ? _a17 : {},
8981
+ additionalProperties: false
8982
+ }) : schemas[name17].parameters;
8939
8983
  const self = this;
8940
8984
  const toolWithExecute = tool({
8941
8985
  description,
8942
8986
  parameters,
8943
8987
  execute: async (args, options) => {
8944
- var _a17;
8945
- (_a17 = options == null ? void 0 : options.abortSignal) == null ? void 0 : _a17.throwIfAborted();
8988
+ var _a18;
8989
+ (_a18 = options == null ? void 0 : options.abortSignal) == null ? void 0 : _a18.throwIfAborted();
8946
8990
  return self.callTool({
8947
8991
  name: name17,
8948
8992
  args,
@@ -8995,7 +9039,7 @@ var MCPClient = class {
8995
9039
  };
8996
9040
 
8997
9041
  // core/util/cosine-similarity.ts
8998
- function cosineSimilarity(vector1, vector2, options) {
9042
+ function cosineSimilarity(vector1, vector2) {
8999
9043
  if (vector1.length !== vector2.length) {
9000
9044
  throw new InvalidArgumentError({
9001
9045
  parameter: "vector1,vector2",
@@ -9005,13 +9049,6 @@ function cosineSimilarity(vector1, vector2, options) {
9005
9049
  }
9006
9050
  const n = vector1.length;
9007
9051
  if (n === 0) {
9008
- if (options == null ? void 0 : options.throwErrorForEmptyVectors) {
9009
- throw new InvalidArgumentError({
9010
- parameter: "vector1",
9011
- value: vector1,
9012
- message: "Vectors cannot be empty"
9013
- });
9014
- }
9015
9052
  return 0;
9016
9053
  }
9017
9054
  let magnitudeSquared1 = 0;
@@ -9282,7 +9319,7 @@ var StreamData = class {
9282
9319
  }
9283
9320
  };
9284
9321
  export {
9285
- AISDKError16 as AISDKError,
9322
+ AISDKError17 as AISDKError,
9286
9323
  APICallError2 as APICallError,
9287
9324
  DownloadError,
9288
9325
  EmptyResponseBodyError,