ai 5.0.0-canary.6 → 5.0.0-canary.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -30,28 +30,28 @@ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: tru
30
30
  // index.ts
31
31
  var ai_exports = {};
32
32
  __export(ai_exports, {
33
- AISDKError: () => import_provider20.AISDKError,
34
- APICallError: () => import_provider20.APICallError,
33
+ AISDKError: () => import_provider21.AISDKError,
34
+ APICallError: () => import_provider21.APICallError,
35
35
  DownloadError: () => DownloadError,
36
- EmptyResponseBodyError: () => import_provider20.EmptyResponseBodyError,
36
+ EmptyResponseBodyError: () => import_provider21.EmptyResponseBodyError,
37
37
  InvalidArgumentError: () => InvalidArgumentError,
38
38
  InvalidDataContentError: () => InvalidDataContentError,
39
39
  InvalidMessageRoleError: () => InvalidMessageRoleError,
40
- InvalidPromptError: () => import_provider20.InvalidPromptError,
41
- InvalidResponseDataError: () => import_provider20.InvalidResponseDataError,
40
+ InvalidPromptError: () => import_provider21.InvalidPromptError,
41
+ InvalidResponseDataError: () => import_provider21.InvalidResponseDataError,
42
42
  InvalidStreamPartError: () => InvalidStreamPartError,
43
43
  InvalidToolArgumentsError: () => InvalidToolArgumentsError,
44
- JSONParseError: () => import_provider20.JSONParseError,
44
+ JSONParseError: () => import_provider21.JSONParseError,
45
45
  LangChainAdapter: () => langchain_adapter_exports,
46
46
  LlamaIndexAdapter: () => llamaindex_adapter_exports,
47
- LoadAPIKeyError: () => import_provider20.LoadAPIKeyError,
47
+ LoadAPIKeyError: () => import_provider21.LoadAPIKeyError,
48
48
  MCPClientError: () => MCPClientError,
49
49
  MessageConversionError: () => MessageConversionError,
50
- NoContentGeneratedError: () => import_provider20.NoContentGeneratedError,
50
+ NoContentGeneratedError: () => import_provider21.NoContentGeneratedError,
51
51
  NoImageGeneratedError: () => NoImageGeneratedError,
52
52
  NoObjectGeneratedError: () => NoObjectGeneratedError,
53
53
  NoOutputSpecifiedError: () => NoOutputSpecifiedError,
54
- NoSuchModelError: () => import_provider20.NoSuchModelError,
54
+ NoSuchModelError: () => import_provider21.NoSuchModelError,
55
55
  NoSuchProviderError: () => NoSuchProviderError,
56
56
  NoSuchToolError: () => NoSuchToolError,
57
57
  Output: () => output_exports,
@@ -59,8 +59,8 @@ __export(ai_exports, {
59
59
  StreamData: () => StreamData,
60
60
  ToolCallRepairError: () => ToolCallRepairError,
61
61
  ToolExecutionError: () => ToolExecutionError,
62
- TypeValidationError: () => import_provider20.TypeValidationError,
63
- UnsupportedFunctionalityError: () => import_provider20.UnsupportedFunctionalityError,
62
+ TypeValidationError: () => import_provider21.TypeValidationError,
63
+ UnsupportedFunctionalityError: () => import_provider21.UnsupportedFunctionalityError,
64
64
  appendClientMessage: () => appendClientMessage,
65
65
  appendResponseMessages: () => appendResponseMessages,
66
66
  asSchema: () => asSchema,
@@ -75,7 +75,7 @@ __export(ai_exports, {
75
75
  cosineSimilarity: () => cosineSimilarity,
76
76
  createDataStream: () => createDataStream,
77
77
  createDataStreamResponse: () => createDataStreamResponse,
78
- createIdGenerator: () => import_provider_utils21.createIdGenerator,
78
+ createIdGenerator: () => import_provider_utils20.createIdGenerator,
79
79
  createProviderRegistry: () => createProviderRegistry,
80
80
  customProvider: () => customProvider,
81
81
  defaultSettingsMiddleware: () => defaultSettingsMiddleware,
@@ -91,7 +91,7 @@ __export(ai_exports, {
91
91
  extractReasoningMiddleware: () => extractReasoningMiddleware,
92
92
  fillMessageParts: () => fillMessageParts,
93
93
  formatDataStreamPart: () => formatDataStreamPart,
94
- generateId: () => import_provider_utils21.generateId,
94
+ generateId: () => import_provider_utils20.generateId,
95
95
  generateObject: () => generateObject,
96
96
  generateText: () => generateText,
97
97
  getMessageParts: () => getMessageParts,
@@ -119,7 +119,7 @@ __export(ai_exports, {
119
119
  module.exports = __toCommonJS(ai_exports);
120
120
 
121
121
  // core/index.ts
122
- var import_provider_utils21 = require("@ai-sdk/provider-utils");
122
+ var import_provider_utils20 = require("@ai-sdk/provider-utils");
123
123
 
124
124
  // core/util/index.ts
125
125
  var import_provider_utils5 = require("@ai-sdk/provider-utils");
@@ -1536,7 +1536,10 @@ function isSchema(value) {
1536
1536
  return typeof value === "object" && value !== null && schemaSymbol in value && value[schemaSymbol] === true && "jsonSchema" in value && "validate" in value;
1537
1537
  }
1538
1538
  function asSchema(schema) {
1539
- return isSchema(schema) ? schema : zodSchema(schema);
1539
+ return schema == null ? jsonSchema({
1540
+ properties: {},
1541
+ additionalProperties: false
1542
+ }) : isSchema(schema) ? schema : zodSchema(schema);
1540
1543
  }
1541
1544
 
1542
1545
  // core/util/should-resubmit-messages.ts
@@ -2104,6 +2107,7 @@ function selectTelemetryAttributes({
2104
2107
  async function embed({
2105
2108
  model,
2106
2109
  value,
2110
+ providerOptions,
2107
2111
  maxRetries: maxRetriesArg,
2108
2112
  abortSignal,
2109
2113
  headers,
@@ -2129,7 +2133,7 @@ async function embed({
2129
2133
  }),
2130
2134
  tracer,
2131
2135
  fn: async (span) => {
2132
- const { embedding, usage, rawResponse } = await retry(
2136
+ const { embedding, usage, response } = await retry(
2133
2137
  () => (
2134
2138
  // nested spans to align with the embedMany telemetry data:
2135
2139
  recordSpan({
@@ -2152,7 +2156,8 @@ async function embed({
2152
2156
  const modelResponse = await model.doEmbed({
2153
2157
  values: [value],
2154
2158
  abortSignal,
2155
- headers
2159
+ headers,
2160
+ providerOptions
2156
2161
  });
2157
2162
  const embedding2 = modelResponse.embeddings[0];
2158
2163
  const usage2 = (_a17 = modelResponse.usage) != null ? _a17 : { tokens: NaN };
@@ -2172,7 +2177,7 @@ async function embed({
2172
2177
  return {
2173
2178
  embedding: embedding2,
2174
2179
  usage: usage2,
2175
- rawResponse: modelResponse.rawResponse
2180
+ response: modelResponse.response
2176
2181
  };
2177
2182
  }
2178
2183
  })
@@ -2187,7 +2192,12 @@ async function embed({
2187
2192
  }
2188
2193
  })
2189
2194
  );
2190
- return new DefaultEmbedResult({ value, embedding, usage, rawResponse });
2195
+ return new DefaultEmbedResult({
2196
+ value,
2197
+ embedding,
2198
+ usage,
2199
+ response
2200
+ });
2191
2201
  }
2192
2202
  });
2193
2203
  }
@@ -2196,7 +2206,7 @@ var DefaultEmbedResult = class {
2196
2206
  this.value = options.value;
2197
2207
  this.embedding = options.embedding;
2198
2208
  this.usage = options.usage;
2199
- this.rawResponse = options.rawResponse;
2209
+ this.response = options.response;
2200
2210
  }
2201
2211
  };
2202
2212
 
@@ -2219,6 +2229,7 @@ async function embedMany({
2219
2229
  maxRetries: maxRetriesArg,
2220
2230
  abortSignal,
2221
2231
  headers,
2232
+ providerOptions,
2222
2233
  experimental_telemetry: telemetry
2223
2234
  }) {
2224
2235
  const { maxRetries, retry } = prepareRetries({ maxRetries: maxRetriesArg });
@@ -2246,7 +2257,7 @@ async function embedMany({
2246
2257
  fn: async (span) => {
2247
2258
  const maxEmbeddingsPerCall = model.maxEmbeddingsPerCall;
2248
2259
  if (maxEmbeddingsPerCall == null) {
2249
- const { embeddings: embeddings2, usage } = await retry(() => {
2260
+ const { embeddings: embeddings2, usage, response } = await retry(() => {
2250
2261
  return recordSpan({
2251
2262
  name: "ai.embedMany.doEmbed",
2252
2263
  attributes: selectTelemetryAttributes({
@@ -2269,7 +2280,8 @@ async function embedMany({
2269
2280
  const modelResponse = await model.doEmbed({
2270
2281
  values,
2271
2282
  abortSignal,
2272
- headers
2283
+ headers,
2284
+ providerOptions
2273
2285
  });
2274
2286
  const embeddings3 = modelResponse.embeddings;
2275
2287
  const usage2 = (_a17 = modelResponse.usage) != null ? _a17 : { tokens: NaN };
@@ -2284,7 +2296,11 @@ async function embedMany({
2284
2296
  }
2285
2297
  })
2286
2298
  );
2287
- return { embeddings: embeddings3, usage: usage2 };
2299
+ return {
2300
+ embeddings: embeddings3,
2301
+ usage: usage2,
2302
+ response: modelResponse.response
2303
+ };
2288
2304
  }
2289
2305
  });
2290
2306
  });
@@ -2299,13 +2315,23 @@ async function embedMany({
2299
2315
  }
2300
2316
  })
2301
2317
  );
2302
- return new DefaultEmbedManyResult({ values, embeddings: embeddings2, usage });
2318
+ return new DefaultEmbedManyResult({
2319
+ values,
2320
+ embeddings: embeddings2,
2321
+ usage,
2322
+ responses: [response]
2323
+ });
2303
2324
  }
2304
2325
  const valueChunks = splitArray(values, maxEmbeddingsPerCall);
2305
2326
  const embeddings = [];
2327
+ const responses = [];
2306
2328
  let tokens = 0;
2307
2329
  for (const chunk of valueChunks) {
2308
- const { embeddings: responseEmbeddings, usage } = await retry(() => {
2330
+ const {
2331
+ embeddings: responseEmbeddings,
2332
+ usage,
2333
+ response
2334
+ } = await retry(() => {
2309
2335
  return recordSpan({
2310
2336
  name: "ai.embedMany.doEmbed",
2311
2337
  attributes: selectTelemetryAttributes({
@@ -2328,7 +2354,8 @@ async function embedMany({
2328
2354
  const modelResponse = await model.doEmbed({
2329
2355
  values: chunk,
2330
2356
  abortSignal,
2331
- headers
2357
+ headers,
2358
+ providerOptions
2332
2359
  });
2333
2360
  const embeddings2 = modelResponse.embeddings;
2334
2361
  const usage2 = (_a17 = modelResponse.usage) != null ? _a17 : { tokens: NaN };
@@ -2343,11 +2370,16 @@ async function embedMany({
2343
2370
  }
2344
2371
  })
2345
2372
  );
2346
- return { embeddings: embeddings2, usage: usage2 };
2373
+ return {
2374
+ embeddings: embeddings2,
2375
+ usage: usage2,
2376
+ response: modelResponse.response
2377
+ };
2347
2378
  }
2348
2379
  });
2349
2380
  });
2350
2381
  embeddings.push(...responseEmbeddings);
2382
+ responses.push(response);
2351
2383
  tokens += usage.tokens;
2352
2384
  }
2353
2385
  span.setAttributes(
@@ -2364,7 +2396,8 @@ async function embedMany({
2364
2396
  return new DefaultEmbedManyResult({
2365
2397
  values,
2366
2398
  embeddings,
2367
- usage: { tokens }
2399
+ usage: { tokens },
2400
+ responses
2368
2401
  });
2369
2402
  }
2370
2403
  });
@@ -2374,6 +2407,7 @@ var DefaultEmbedManyResult = class {
2374
2407
  this.values = options.values;
2375
2408
  this.embeddings = options.embeddings;
2376
2409
  this.usage = options.usage;
2410
+ this.responses = options.responses;
2377
2411
  }
2378
2412
  };
2379
2413
 
@@ -2629,8 +2663,8 @@ var DefaultGenerateImageResult = class {
2629
2663
  };
2630
2664
 
2631
2665
  // core/generate-object/generate-object.ts
2632
- var import_provider12 = require("@ai-sdk/provider");
2633
- var import_provider_utils12 = require("@ai-sdk/provider-utils");
2666
+ var import_provider13 = require("@ai-sdk/provider");
2667
+ var import_provider_utils11 = require("@ai-sdk/provider-utils");
2634
2668
 
2635
2669
  // errors/no-object-generated-error.ts
2636
2670
  var import_provider5 = require("@ai-sdk/provider");
@@ -2660,9 +2694,6 @@ var NoObjectGeneratedError = class extends import_provider5.AISDKError {
2660
2694
  };
2661
2695
  _a4 = symbol4;
2662
2696
 
2663
- // core/prompt/convert-to-language-model-prompt.ts
2664
- var import_provider_utils9 = require("@ai-sdk/provider-utils");
2665
-
2666
2697
  // util/download-error.ts
2667
2698
  var import_provider6 = require("@ai-sdk/provider");
2668
2699
  var name5 = "AI_DownloadError";
@@ -2715,7 +2746,9 @@ async function download({ url }) {
2715
2746
  }
2716
2747
 
2717
2748
  // core/prompt/data-content.ts
2749
+ var import_provider8 = require("@ai-sdk/provider");
2718
2750
  var import_provider_utils8 = require("@ai-sdk/provider-utils");
2751
+ var import_zod = require("zod");
2719
2752
 
2720
2753
  // core/prompt/invalid-data-content-error.ts
2721
2754
  var import_provider7 = require("@ai-sdk/provider");
@@ -2739,8 +2772,23 @@ var InvalidDataContentError = class extends import_provider7.AISDKError {
2739
2772
  };
2740
2773
  _a6 = symbol6;
2741
2774
 
2775
+ // core/prompt/split-data-url.ts
2776
+ function splitDataUrl(dataUrl) {
2777
+ try {
2778
+ const [header, base64Content] = dataUrl.split(",");
2779
+ return {
2780
+ mediaType: header.split(";")[0].split(":")[1],
2781
+ base64Content
2782
+ };
2783
+ } catch (error) {
2784
+ return {
2785
+ mediaType: void 0,
2786
+ base64Content: void 0
2787
+ };
2788
+ }
2789
+ }
2790
+
2742
2791
  // core/prompt/data-content.ts
2743
- var import_zod = require("zod");
2744
2792
  var dataContentSchema = import_zod.z.union([
2745
2793
  import_zod.z.string(),
2746
2794
  import_zod.z.instanceof(Uint8Array),
@@ -2754,6 +2802,33 @@ var dataContentSchema = import_zod.z.union([
2754
2802
  { message: "Must be a Buffer" }
2755
2803
  )
2756
2804
  ]);
2805
+ function convertToLanguageModelV2DataContent(content) {
2806
+ if (content instanceof Uint8Array) {
2807
+ return { data: content, mediaType: void 0 };
2808
+ }
2809
+ if (content instanceof ArrayBuffer) {
2810
+ return { data: new Uint8Array(content), mediaType: void 0 };
2811
+ }
2812
+ if (typeof content === "string") {
2813
+ try {
2814
+ content = new URL(content);
2815
+ } catch (error) {
2816
+ }
2817
+ }
2818
+ if (content instanceof URL && content.protocol === "data:") {
2819
+ const { mediaType: dataUrlMediaType, base64Content } = splitDataUrl(
2820
+ content.toString()
2821
+ );
2822
+ if (dataUrlMediaType == null || base64Content == null) {
2823
+ throw new import_provider8.AISDKError({
2824
+ name: "InvalidDataContentError",
2825
+ message: `Invalid data URL format in content ${content.toString()}`
2826
+ });
2827
+ }
2828
+ return { data: base64Content, mediaType: dataUrlMediaType };
2829
+ }
2830
+ return { data: content, mediaType: void 0 };
2831
+ }
2757
2832
  function convertDataContentToBase64String(content) {
2758
2833
  if (typeof content === "string") {
2759
2834
  return content;
@@ -2792,12 +2867,12 @@ function convertUint8ArrayToText(uint8Array) {
2792
2867
  }
2793
2868
 
2794
2869
  // core/prompt/invalid-message-role-error.ts
2795
- var import_provider8 = require("@ai-sdk/provider");
2870
+ var import_provider9 = require("@ai-sdk/provider");
2796
2871
  var name7 = "AI_InvalidMessageRoleError";
2797
2872
  var marker7 = `vercel.ai.error.${name7}`;
2798
2873
  var symbol7 = Symbol.for(marker7);
2799
2874
  var _a7;
2800
- var InvalidMessageRoleError = class extends import_provider8.AISDKError {
2875
+ var InvalidMessageRoleError = class extends import_provider9.AISDKError {
2801
2876
  constructor({
2802
2877
  role,
2803
2878
  message = `Invalid message role: '${role}'. Must be one of: "system", "user", "assistant", "tool".`
@@ -2807,27 +2882,11 @@ var InvalidMessageRoleError = class extends import_provider8.AISDKError {
2807
2882
  this.role = role;
2808
2883
  }
2809
2884
  static isInstance(error) {
2810
- return import_provider8.AISDKError.hasMarker(error, marker7);
2885
+ return import_provider9.AISDKError.hasMarker(error, marker7);
2811
2886
  }
2812
2887
  };
2813
2888
  _a7 = symbol7;
2814
2889
 
2815
- // core/prompt/split-data-url.ts
2816
- function splitDataUrl(dataUrl) {
2817
- try {
2818
- const [header, base64Content] = dataUrl.split(",");
2819
- return {
2820
- mediaType: header.split(";")[0].split(":")[1],
2821
- base64Content
2822
- };
2823
- } catch (error) {
2824
- return {
2825
- mediaType: void 0,
2826
- base64Content: void 0
2827
- };
2828
- }
2829
- }
2830
-
2831
2890
  // core/prompt/convert-to-language-model-prompt.ts
2832
2891
  async function convertToLanguageModelPrompt({
2833
2892
  prompt,
@@ -2849,14 +2908,13 @@ async function convertToLanguageModelPrompt({
2849
2908
  ];
2850
2909
  }
2851
2910
  function convertToLanguageModelMessage(message, downloadedAssets) {
2852
- var _a17, _b, _c, _d, _e, _f;
2853
2911
  const role = message.role;
2854
2912
  switch (role) {
2855
2913
  case "system": {
2856
2914
  return {
2857
2915
  role: "system",
2858
2916
  content: message.content,
2859
- providerOptions: (_a17 = message.providerOptions) != null ? _a17 : message.experimental_providerMetadata
2917
+ providerOptions: message.providerOptions
2860
2918
  };
2861
2919
  }
2862
2920
  case "user": {
@@ -2864,13 +2922,13 @@ function convertToLanguageModelMessage(message, downloadedAssets) {
2864
2922
  return {
2865
2923
  role: "user",
2866
2924
  content: [{ type: "text", text: message.content }],
2867
- providerOptions: (_b = message.providerOptions) != null ? _b : message.experimental_providerMetadata
2925
+ providerOptions: message.providerOptions
2868
2926
  };
2869
2927
  }
2870
2928
  return {
2871
2929
  role: "user",
2872
2930
  content: message.content.map((part) => convertPartToLanguageModelPart(part, downloadedAssets)).filter((part) => part.type !== "text" || part.text !== ""),
2873
- providerOptions: (_c = message.providerOptions) != null ? _c : message.experimental_providerMetadata
2931
+ providerOptions: message.providerOptions
2874
2932
  };
2875
2933
  }
2876
2934
  case "assistant": {
@@ -2878,7 +2936,7 @@ function convertToLanguageModelMessage(message, downloadedAssets) {
2878
2936
  return {
2879
2937
  role: "assistant",
2880
2938
  content: [{ type: "text", text: message.content }],
2881
- providerOptions: (_d = message.providerOptions) != null ? _d : message.experimental_providerMetadata
2939
+ providerOptions: message.providerOptions
2882
2940
  };
2883
2941
  }
2884
2942
  return {
@@ -2887,15 +2945,18 @@ function convertToLanguageModelMessage(message, downloadedAssets) {
2887
2945
  // remove empty text parts:
2888
2946
  (part) => part.type !== "text" || part.text !== ""
2889
2947
  ).map((part) => {
2890
- var _a18, _b2;
2891
- const providerOptions = (_a18 = part.providerOptions) != null ? _a18 : part.experimental_providerMetadata;
2948
+ var _a17;
2949
+ const providerOptions = part.providerOptions;
2892
2950
  switch (part.type) {
2893
2951
  case "file": {
2952
+ const { data, mediaType } = convertToLanguageModelV2DataContent(
2953
+ part.data
2954
+ );
2894
2955
  return {
2895
2956
  type: "file",
2896
- data: part.data instanceof URL ? part.data : convertDataContentToBase64String(part.data),
2957
+ data,
2897
2958
  filename: part.filename,
2898
- mediaType: (_b2 = part.mediaType) != null ? _b2 : part.mimeType,
2959
+ mediaType: (_a17 = mediaType != null ? mediaType : part.mediaType) != null ? _a17 : part.mimeType,
2899
2960
  providerOptions
2900
2961
  };
2901
2962
  }
@@ -2932,25 +2993,22 @@ function convertToLanguageModelMessage(message, downloadedAssets) {
2932
2993
  }
2933
2994
  }
2934
2995
  }),
2935
- providerOptions: (_e = message.providerOptions) != null ? _e : message.experimental_providerMetadata
2996
+ providerOptions: message.providerOptions
2936
2997
  };
2937
2998
  }
2938
2999
  case "tool": {
2939
3000
  return {
2940
3001
  role: "tool",
2941
- content: message.content.map((part) => {
2942
- var _a18;
2943
- return {
2944
- type: "tool-result",
2945
- toolCallId: part.toolCallId,
2946
- toolName: part.toolName,
2947
- result: part.result,
2948
- content: part.experimental_content,
2949
- isError: part.isError,
2950
- providerOptions: (_a18 = part.providerOptions) != null ? _a18 : part.experimental_providerMetadata
2951
- };
2952
- }),
2953
- providerOptions: (_f = message.providerOptions) != null ? _f : message.experimental_providerMetadata
3002
+ content: message.content.map((part) => ({
3003
+ type: "tool-result",
3004
+ toolCallId: part.toolCallId,
3005
+ toolName: part.toolName,
3006
+ result: part.result,
3007
+ content: part.experimental_content,
3008
+ isError: part.isError,
3009
+ providerOptions: part.providerOptions
3010
+ })),
3011
+ providerOptions: message.providerOptions
2954
3012
  };
2955
3013
  }
2956
3014
  default: {
@@ -2983,71 +3041,48 @@ async function downloadAssets(messages, downloadImplementation, modelSupportsIma
2983
3041
  );
2984
3042
  }
2985
3043
  function convertPartToLanguageModelPart(part, downloadedAssets) {
2986
- var _a17, _b, _c, _d, _e;
3044
+ var _a17, _b, _c;
2987
3045
  if (part.type === "text") {
2988
3046
  return {
2989
3047
  type: "text",
2990
3048
  text: part.text,
2991
- providerOptions: (_a17 = part.providerOptions) != null ? _a17 : part.experimental_providerMetadata
3049
+ providerOptions: part.providerOptions
2992
3050
  };
2993
3051
  }
2994
- let mediaType = (_b = part.mediaType) != null ? _b : part.mimeType;
2995
- let data;
2996
- let content;
2997
- let normalizedData;
3052
+ let originalData;
2998
3053
  const type = part.type;
2999
3054
  switch (type) {
3000
3055
  case "image":
3001
- data = part.image;
3056
+ originalData = part.image;
3002
3057
  break;
3003
3058
  case "file":
3004
- data = part.data;
3059
+ originalData = part.data;
3005
3060
  break;
3006
3061
  default:
3007
3062
  throw new Error(`Unsupported part type: ${type}`);
3008
3063
  }
3009
- try {
3010
- content = typeof data === "string" ? new URL(data) : data;
3011
- } catch (error) {
3012
- content = data;
3013
- }
3014
- if (content instanceof URL) {
3015
- if (content.protocol === "data:") {
3016
- const { mediaType: dataUrlMediaType, base64Content } = splitDataUrl(
3017
- content.toString()
3018
- );
3019
- if (dataUrlMediaType == null || base64Content == null) {
3020
- throw new Error(`Invalid data URL format in part ${type}`);
3021
- }
3022
- mediaType = dataUrlMediaType;
3023
- normalizedData = convertDataContentToUint8Array(base64Content);
3024
- } else {
3025
- const downloadedFile = downloadedAssets[content.toString()];
3026
- if (downloadedFile) {
3027
- normalizedData = downloadedFile.data;
3028
- mediaType != null ? mediaType : mediaType = downloadedFile.mediaType;
3029
- } else {
3030
- normalizedData = content;
3031
- }
3064
+ const { data: convertedData, mediaType: convertedMediaType } = convertToLanguageModelV2DataContent(originalData);
3065
+ let mediaType = (_a17 = convertedMediaType != null ? convertedMediaType : part.mediaType) != null ? _a17 : part.mimeType;
3066
+ let data = convertedData;
3067
+ if (data instanceof URL) {
3068
+ const downloadedFile = downloadedAssets[data.toString()];
3069
+ if (downloadedFile) {
3070
+ data = downloadedFile.data;
3071
+ mediaType = (_b = downloadedFile.mediaType) != null ? _b : mediaType;
3032
3072
  }
3033
- } else {
3034
- normalizedData = convertDataContentToUint8Array(content);
3035
3073
  }
3036
3074
  switch (type) {
3037
3075
  case "image": {
3038
- if (normalizedData instanceof Uint8Array) {
3039
- mediaType = (_c = detectMediaType({
3040
- data: normalizedData,
3041
- signatures: imageMediaTypeSignatures
3042
- })) != null ? _c : mediaType;
3076
+ if (data instanceof Uint8Array || typeof data === "string") {
3077
+ mediaType = (_c = detectMediaType({ data, signatures: imageMediaTypeSignatures })) != null ? _c : mediaType;
3043
3078
  }
3044
3079
  return {
3045
3080
  type: "file",
3046
3081
  mediaType: mediaType != null ? mediaType : "image/*",
3047
3082
  // any image
3048
3083
  filename: void 0,
3049
- data: normalizedData instanceof Uint8Array ? (0, import_provider_utils9.convertUint8ArrayToBase64)(normalizedData) : normalizedData,
3050
- providerOptions: (_d = part.providerOptions) != null ? _d : part.experimental_providerMetadata
3084
+ data,
3085
+ providerOptions: part.providerOptions
3051
3086
  };
3052
3087
  }
3053
3088
  case "file": {
@@ -3058,8 +3093,8 @@ function convertPartToLanguageModelPart(part, downloadedAssets) {
3058
3093
  type: "file",
3059
3094
  mediaType,
3060
3095
  filename: part.filename,
3061
- data: normalizedData instanceof Uint8Array ? convertDataContentToBase64String(normalizedData) : normalizedData,
3062
- providerOptions: (_e = part.providerOptions) != null ? _e : part.experimental_providerMetadata
3096
+ data,
3097
+ providerOptions: part.providerOptions
3063
3098
  };
3064
3099
  }
3065
3100
  }
@@ -3067,7 +3102,7 @@ function convertPartToLanguageModelPart(part, downloadedAssets) {
3067
3102
 
3068
3103
  // core/prompt/prepare-call-settings.ts
3069
3104
  function prepareCallSettings({
3070
- maxTokens,
3105
+ maxOutputTokens,
3071
3106
  temperature,
3072
3107
  topP,
3073
3108
  topK,
@@ -3076,19 +3111,19 @@ function prepareCallSettings({
3076
3111
  stopSequences,
3077
3112
  seed
3078
3113
  }) {
3079
- if (maxTokens != null) {
3080
- if (!Number.isInteger(maxTokens)) {
3114
+ if (maxOutputTokens != null) {
3115
+ if (!Number.isInteger(maxOutputTokens)) {
3081
3116
  throw new InvalidArgumentError({
3082
- parameter: "maxTokens",
3083
- value: maxTokens,
3084
- message: "maxTokens must be an integer"
3117
+ parameter: "maxOutputTokens",
3118
+ value: maxOutputTokens,
3119
+ message: "maxOutputTokens must be an integer"
3085
3120
  });
3086
3121
  }
3087
- if (maxTokens < 1) {
3122
+ if (maxOutputTokens < 1) {
3088
3123
  throw new InvalidArgumentError({
3089
- parameter: "maxTokens",
3090
- value: maxTokens,
3091
- message: "maxTokens must be >= 1"
3124
+ parameter: "maxOutputTokens",
3125
+ value: maxOutputTokens,
3126
+ message: "maxOutputTokens must be >= 1"
3092
3127
  });
3093
3128
  }
3094
3129
  }
@@ -3147,7 +3182,7 @@ function prepareCallSettings({
3147
3182
  }
3148
3183
  }
3149
3184
  return {
3150
- maxTokens,
3185
+ maxOutputTokens,
3151
3186
  // TODO v5 remove default 0 for temperature
3152
3187
  temperature: temperature != null ? temperature : 0,
3153
3188
  topP,
@@ -3160,8 +3195,8 @@ function prepareCallSettings({
3160
3195
  }
3161
3196
 
3162
3197
  // core/prompt/standardize-prompt.ts
3163
- var import_provider10 = require("@ai-sdk/provider");
3164
- var import_provider_utils10 = require("@ai-sdk/provider-utils");
3198
+ var import_provider11 = require("@ai-sdk/provider");
3199
+ var import_provider_utils9 = require("@ai-sdk/provider-utils");
3165
3200
  var import_zod7 = require("zod");
3166
3201
 
3167
3202
  // core/prompt/attachments-to-parts.ts
@@ -3242,12 +3277,12 @@ function attachmentsToParts(attachments) {
3242
3277
  }
3243
3278
 
3244
3279
  // core/prompt/message-conversion-error.ts
3245
- var import_provider9 = require("@ai-sdk/provider");
3280
+ var import_provider10 = require("@ai-sdk/provider");
3246
3281
  var name8 = "AI_MessageConversionError";
3247
3282
  var marker8 = `vercel.ai.error.${name8}`;
3248
3283
  var symbol8 = Symbol.for(marker8);
3249
3284
  var _a8;
3250
- var MessageConversionError = class extends import_provider9.AISDKError {
3285
+ var MessageConversionError = class extends import_provider10.AISDKError {
3251
3286
  constructor({
3252
3287
  originalMessage,
3253
3288
  message
@@ -3257,7 +3292,7 @@ var MessageConversionError = class extends import_provider9.AISDKError {
3257
3292
  this.originalMessage = originalMessage;
3258
3293
  }
3259
3294
  static isInstance(error) {
3260
- return import_provider9.AISDKError.hasMarker(error, marker8);
3295
+ return import_provider10.AISDKError.hasMarker(error, marker8);
3261
3296
  }
3262
3297
  };
3263
3298
  _a8 = symbol8;
@@ -3531,7 +3566,7 @@ function detectSingleMessageCharacteristics(message) {
3531
3566
  "experimental_attachments" in message)) {
3532
3567
  return "has-ui-specific-parts";
3533
3568
  } else if (typeof message === "object" && message !== null && "content" in message && (Array.isArray(message.content) || // Core messages can have array content
3534
- "experimental_providerMetadata" in message || "providerOptions" in message)) {
3569
+ "providerOptions" in message)) {
3535
3570
  return "has-core-specific-parts";
3536
3571
  } else if (typeof message === "object" && message !== null && "role" in message && "content" in message && typeof message.content === "string" && ["system", "user", "assistant", "tool"].includes(message.role)) {
3537
3572
  return "message";
@@ -3585,16 +3620,14 @@ var toolResultContentSchema = import_zod4.z.array(
3585
3620
  var textPartSchema = import_zod5.z.object({
3586
3621
  type: import_zod5.z.literal("text"),
3587
3622
  text: import_zod5.z.string(),
3588
- providerOptions: providerMetadataSchema.optional(),
3589
- experimental_providerMetadata: providerMetadataSchema.optional()
3623
+ providerOptions: providerMetadataSchema.optional()
3590
3624
  });
3591
3625
  var imagePartSchema = import_zod5.z.object({
3592
3626
  type: import_zod5.z.literal("image"),
3593
3627
  image: import_zod5.z.union([dataContentSchema, import_zod5.z.instanceof(URL)]),
3594
3628
  mediaType: import_zod5.z.string().optional(),
3595
3629
  mimeType: import_zod5.z.string().optional(),
3596
- providerOptions: providerMetadataSchema.optional(),
3597
- experimental_providerMetadata: providerMetadataSchema.optional()
3630
+ providerOptions: providerMetadataSchema.optional()
3598
3631
  });
3599
3632
  var filePartSchema = import_zod5.z.object({
3600
3633
  type: import_zod5.z.literal("file"),
@@ -3602,28 +3635,24 @@ var filePartSchema = import_zod5.z.object({
3602
3635
  filename: import_zod5.z.string().optional(),
3603
3636
  mediaType: import_zod5.z.string(),
3604
3637
  mimeType: import_zod5.z.string().optional(),
3605
- providerOptions: providerMetadataSchema.optional(),
3606
- experimental_providerMetadata: providerMetadataSchema.optional()
3638
+ providerOptions: providerMetadataSchema.optional()
3607
3639
  });
3608
3640
  var reasoningPartSchema = import_zod5.z.object({
3609
3641
  type: import_zod5.z.literal("reasoning"),
3610
3642
  text: import_zod5.z.string(),
3611
- providerOptions: providerMetadataSchema.optional(),
3612
- experimental_providerMetadata: providerMetadataSchema.optional()
3643
+ providerOptions: providerMetadataSchema.optional()
3613
3644
  });
3614
3645
  var redactedReasoningPartSchema = import_zod5.z.object({
3615
3646
  type: import_zod5.z.literal("redacted-reasoning"),
3616
3647
  data: import_zod5.z.string(),
3617
- providerOptions: providerMetadataSchema.optional(),
3618
- experimental_providerMetadata: providerMetadataSchema.optional()
3648
+ providerOptions: providerMetadataSchema.optional()
3619
3649
  });
3620
3650
  var toolCallPartSchema = import_zod5.z.object({
3621
3651
  type: import_zod5.z.literal("tool-call"),
3622
3652
  toolCallId: import_zod5.z.string(),
3623
3653
  toolName: import_zod5.z.string(),
3624
3654
  args: import_zod5.z.unknown(),
3625
- providerOptions: providerMetadataSchema.optional(),
3626
- experimental_providerMetadata: providerMetadataSchema.optional()
3655
+ providerOptions: providerMetadataSchema.optional()
3627
3656
  });
3628
3657
  var toolResultPartSchema = import_zod5.z.object({
3629
3658
  type: import_zod5.z.literal("tool-result"),
@@ -3632,16 +3661,14 @@ var toolResultPartSchema = import_zod5.z.object({
3632
3661
  result: import_zod5.z.unknown(),
3633
3662
  content: toolResultContentSchema.optional(),
3634
3663
  isError: import_zod5.z.boolean().optional(),
3635
- providerOptions: providerMetadataSchema.optional(),
3636
- experimental_providerMetadata: providerMetadataSchema.optional()
3664
+ providerOptions: providerMetadataSchema.optional()
3637
3665
  });
3638
3666
 
3639
3667
  // core/prompt/message.ts
3640
3668
  var coreSystemMessageSchema = import_zod6.z.object({
3641
3669
  role: import_zod6.z.literal("system"),
3642
3670
  content: import_zod6.z.string(),
3643
- providerOptions: providerMetadataSchema.optional(),
3644
- experimental_providerMetadata: providerMetadataSchema.optional()
3671
+ providerOptions: providerMetadataSchema.optional()
3645
3672
  });
3646
3673
  var coreUserMessageSchema = import_zod6.z.object({
3647
3674
  role: import_zod6.z.literal("user"),
@@ -3649,8 +3676,7 @@ var coreUserMessageSchema = import_zod6.z.object({
3649
3676
  import_zod6.z.string(),
3650
3677
  import_zod6.z.array(import_zod6.z.union([textPartSchema, imagePartSchema, filePartSchema]))
3651
3678
  ]),
3652
- providerOptions: providerMetadataSchema.optional(),
3653
- experimental_providerMetadata: providerMetadataSchema.optional()
3679
+ providerOptions: providerMetadataSchema.optional()
3654
3680
  });
3655
3681
  var coreAssistantMessageSchema = import_zod6.z.object({
3656
3682
  role: import_zod6.z.literal("assistant"),
@@ -3666,14 +3692,12 @@ var coreAssistantMessageSchema = import_zod6.z.object({
3666
3692
  ])
3667
3693
  )
3668
3694
  ]),
3669
- providerOptions: providerMetadataSchema.optional(),
3670
- experimental_providerMetadata: providerMetadataSchema.optional()
3695
+ providerOptions: providerMetadataSchema.optional()
3671
3696
  });
3672
3697
  var coreToolMessageSchema = import_zod6.z.object({
3673
3698
  role: import_zod6.z.literal("tool"),
3674
3699
  content: import_zod6.z.array(toolResultPartSchema),
3675
- providerOptions: providerMetadataSchema.optional(),
3676
- experimental_providerMetadata: providerMetadataSchema.optional()
3700
+ providerOptions: providerMetadataSchema.optional()
3677
3701
  });
3678
3702
  var coreMessageSchema = import_zod6.z.union([
3679
3703
  coreSystemMessageSchema,
@@ -3688,26 +3712,26 @@ function standardizePrompt({
3688
3712
  tools
3689
3713
  }) {
3690
3714
  if (prompt.prompt == null && prompt.messages == null) {
3691
- throw new import_provider10.InvalidPromptError({
3715
+ throw new import_provider11.InvalidPromptError({
3692
3716
  prompt,
3693
3717
  message: "prompt or messages must be defined"
3694
3718
  });
3695
3719
  }
3696
3720
  if (prompt.prompt != null && prompt.messages != null) {
3697
- throw new import_provider10.InvalidPromptError({
3721
+ throw new import_provider11.InvalidPromptError({
3698
3722
  prompt,
3699
3723
  message: "prompt and messages cannot be defined at the same time"
3700
3724
  });
3701
3725
  }
3702
3726
  if (prompt.system != null && typeof prompt.system !== "string") {
3703
- throw new import_provider10.InvalidPromptError({
3727
+ throw new import_provider11.InvalidPromptError({
3704
3728
  prompt,
3705
3729
  message: "system must be a string"
3706
3730
  });
3707
3731
  }
3708
3732
  if (prompt.prompt != null) {
3709
3733
  if (typeof prompt.prompt !== "string") {
3710
- throw new import_provider10.InvalidPromptError({
3734
+ throw new import_provider11.InvalidPromptError({
3711
3735
  prompt,
3712
3736
  message: "prompt must be a string"
3713
3737
  });
@@ -3726,7 +3750,7 @@ function standardizePrompt({
3726
3750
  if (prompt.messages != null) {
3727
3751
  const promptType = detectPromptType(prompt.messages);
3728
3752
  if (promptType === "other") {
3729
- throw new import_provider10.InvalidPromptError({
3753
+ throw new import_provider11.InvalidPromptError({
3730
3754
  prompt,
3731
3755
  message: "messages must be an array of CoreMessage or UIMessage"
3732
3756
  });
@@ -3735,17 +3759,17 @@ function standardizePrompt({
3735
3759
  tools
3736
3760
  }) : prompt.messages;
3737
3761
  if (messages.length === 0) {
3738
- throw new import_provider10.InvalidPromptError({
3762
+ throw new import_provider11.InvalidPromptError({
3739
3763
  prompt,
3740
3764
  message: "messages must not be empty"
3741
3765
  });
3742
3766
  }
3743
- const validationResult = (0, import_provider_utils10.safeValidateTypes)({
3767
+ const validationResult = (0, import_provider_utils9.safeValidateTypes)({
3744
3768
  value: messages,
3745
3769
  schema: import_zod7.z.array(coreMessageSchema)
3746
3770
  });
3747
3771
  if (!validationResult.success) {
3748
- throw new import_provider10.InvalidPromptError({
3772
+ throw new import_provider11.InvalidPromptError({
3749
3773
  prompt,
3750
3774
  message: "messages must be an array of CoreMessage or UIMessage",
3751
3775
  cause: validationResult.error
@@ -3762,13 +3786,13 @@ function standardizePrompt({
3762
3786
 
3763
3787
  // core/types/usage.ts
3764
3788
  function calculateLanguageModelUsage2({
3765
- promptTokens,
3766
- completionTokens
3789
+ inputTokens,
3790
+ outputTokens
3767
3791
  }) {
3768
3792
  return {
3769
- promptTokens,
3770
- completionTokens,
3771
- totalTokens: promptTokens + completionTokens
3793
+ promptTokens: inputTokens != null ? inputTokens : NaN,
3794
+ completionTokens: outputTokens != null ? outputTokens : NaN,
3795
+ totalTokens: (inputTokens != null ? inputTokens : 0) + (outputTokens != null ? outputTokens : 0)
3772
3796
  };
3773
3797
  }
3774
3798
  function addLanguageModelUsage(usage1, usage2) {
@@ -3800,8 +3824,8 @@ function injectJsonInstruction({
3800
3824
  }
3801
3825
 
3802
3826
  // core/generate-object/output-strategy.ts
3803
- var import_provider11 = require("@ai-sdk/provider");
3804
- var import_provider_utils11 = require("@ai-sdk/provider-utils");
3827
+ var import_provider12 = require("@ai-sdk/provider");
3828
+ var import_provider_utils10 = require("@ai-sdk/provider-utils");
3805
3829
 
3806
3830
  // core/util/async-iterable-stream.ts
3807
3831
  function createAsyncIterableStream(source) {
@@ -3838,7 +3862,7 @@ var noSchemaOutputStrategy = {
3838
3862
  } : { success: true, value };
3839
3863
  },
3840
3864
  createElementStream() {
3841
- throw new import_provider11.UnsupportedFunctionalityError({
3865
+ throw new import_provider12.UnsupportedFunctionalityError({
3842
3866
  functionality: "element streams in no-schema mode"
3843
3867
  });
3844
3868
  }
@@ -3857,10 +3881,10 @@ var objectOutputStrategy = (schema) => ({
3857
3881
  };
3858
3882
  },
3859
3883
  validateFinalResult(value) {
3860
- return (0, import_provider_utils11.safeValidateTypes)({ value, schema });
3884
+ return (0, import_provider_utils10.safeValidateTypes)({ value, schema });
3861
3885
  },
3862
3886
  createElementStream() {
3863
- throw new import_provider11.UnsupportedFunctionalityError({
3887
+ throw new import_provider12.UnsupportedFunctionalityError({
3864
3888
  functionality: "element streams in object mode"
3865
3889
  });
3866
3890
  }
@@ -3883,10 +3907,10 @@ var arrayOutputStrategy = (schema) => {
3883
3907
  },
3884
3908
  validatePartialResult({ value, latestObject, isFirstDelta, isFinalDelta }) {
3885
3909
  var _a17;
3886
- if (!(0, import_provider11.isJSONObject)(value) || !(0, import_provider11.isJSONArray)(value.elements)) {
3910
+ if (!(0, import_provider12.isJSONObject)(value) || !(0, import_provider12.isJSONArray)(value.elements)) {
3887
3911
  return {
3888
3912
  success: false,
3889
- error: new import_provider11.TypeValidationError({
3913
+ error: new import_provider12.TypeValidationError({
3890
3914
  value,
3891
3915
  cause: "value must be an object that contains an array of elements"
3892
3916
  })
@@ -3896,7 +3920,7 @@ var arrayOutputStrategy = (schema) => {
3896
3920
  const resultArray = [];
3897
3921
  for (let i = 0; i < inputArray.length; i++) {
3898
3922
  const element = inputArray[i];
3899
- const result = (0, import_provider_utils11.safeValidateTypes)({ value: element, schema });
3923
+ const result = (0, import_provider_utils10.safeValidateTypes)({ value: element, schema });
3900
3924
  if (i === inputArray.length - 1 && !isFinalDelta) {
3901
3925
  continue;
3902
3926
  }
@@ -3926,10 +3950,10 @@ var arrayOutputStrategy = (schema) => {
3926
3950
  };
3927
3951
  },
3928
3952
  validateFinalResult(value) {
3929
- if (!(0, import_provider11.isJSONObject)(value) || !(0, import_provider11.isJSONArray)(value.elements)) {
3953
+ if (!(0, import_provider12.isJSONObject)(value) || !(0, import_provider12.isJSONArray)(value.elements)) {
3930
3954
  return {
3931
3955
  success: false,
3932
- error: new import_provider11.TypeValidationError({
3956
+ error: new import_provider12.TypeValidationError({
3933
3957
  value,
3934
3958
  cause: "value must be an object that contains an array of elements"
3935
3959
  })
@@ -3937,7 +3961,7 @@ var arrayOutputStrategy = (schema) => {
3937
3961
  }
3938
3962
  const inputArray = value.elements;
3939
3963
  for (const element of inputArray) {
3940
- const result = (0, import_provider_utils11.safeValidateTypes)({ value: element, schema });
3964
+ const result = (0, import_provider_utils10.safeValidateTypes)({ value: element, schema });
3941
3965
  if (!result.success) {
3942
3966
  return result;
3943
3967
  }
@@ -3992,10 +4016,10 @@ var enumOutputStrategy = (enumValues) => {
3992
4016
  additionalProperties: false
3993
4017
  },
3994
4018
  validateFinalResult(value) {
3995
- if (!(0, import_provider11.isJSONObject)(value) || typeof value.result !== "string") {
4019
+ if (!(0, import_provider12.isJSONObject)(value) || typeof value.result !== "string") {
3996
4020
  return {
3997
4021
  success: false,
3998
- error: new import_provider11.TypeValidationError({
4022
+ error: new import_provider12.TypeValidationError({
3999
4023
  value,
4000
4024
  cause: 'value must be an object that contains a string in the "result" property.'
4001
4025
  })
@@ -4004,19 +4028,19 @@ var enumOutputStrategy = (enumValues) => {
4004
4028
  const result = value.result;
4005
4029
  return enumValues.includes(result) ? { success: true, value: result } : {
4006
4030
  success: false,
4007
- error: new import_provider11.TypeValidationError({
4031
+ error: new import_provider12.TypeValidationError({
4008
4032
  value,
4009
4033
  cause: "value must be a string in the enum"
4010
4034
  })
4011
4035
  };
4012
4036
  },
4013
4037
  validatePartialResult() {
4014
- throw new import_provider11.UnsupportedFunctionalityError({
4038
+ throw new import_provider12.UnsupportedFunctionalityError({
4015
4039
  functionality: "partial results in enum mode"
4016
4040
  });
4017
4041
  },
4018
4042
  createElementStream() {
4019
- throw new import_provider11.UnsupportedFunctionalityError({
4043
+ throw new import_provider12.UnsupportedFunctionalityError({
4020
4044
  functionality: "element streams in enum mode"
4021
4045
  });
4022
4046
  }
@@ -4170,7 +4194,7 @@ function validateObjectGenerationInput({
4170
4194
  }
4171
4195
 
4172
4196
  // core/generate-object/generate-object.ts
4173
- var originalGenerateId = (0, import_provider_utils12.createIdGenerator)({ prefix: "aiobj", size: 24 });
4197
+ var originalGenerateId = (0, import_provider_utils11.createIdGenerator)({ prefix: "aiobj", size: 24 });
4174
4198
  async function generateObject({
4175
4199
  model,
4176
4200
  enum: enumValues,
@@ -4188,8 +4212,7 @@ async function generateObject({
4188
4212
  headers,
4189
4213
  experimental_repairText: repairText,
4190
4214
  experimental_telemetry: telemetry,
4191
- experimental_providerMetadata,
4192
- providerOptions = experimental_providerMetadata,
4215
+ providerOptions,
4193
4216
  _internal: {
4194
4217
  generateId: generateId3 = originalGenerateId,
4195
4218
  currentDate = () => /* @__PURE__ */ new Date()
@@ -4243,7 +4266,7 @@ async function generateObject({
4243
4266
  }),
4244
4267
  tracer,
4245
4268
  fn: async (span) => {
4246
- var _a17, _b, _c, _d;
4269
+ var _a17, _b, _c, _d, _e;
4247
4270
  if (mode === "auto" || mode == null) {
4248
4271
  mode = model.defaultObjectGenerationMode;
4249
4272
  }
@@ -4296,7 +4319,7 @@ async function generateObject({
4296
4319
  "gen_ai.system": model.provider,
4297
4320
  "gen_ai.request.model": model.modelId,
4298
4321
  "gen_ai.request.frequency_penalty": settings.frequencyPenalty,
4299
- "gen_ai.request.max_tokens": settings.maxTokens,
4322
+ "gen_ai.request.max_tokens": settings.maxOutputTokens,
4300
4323
  "gen_ai.request.presence_penalty": settings.presencePenalty,
4301
4324
  "gen_ai.request.temperature": settings.temperature,
4302
4325
  "gen_ai.request.top_k": settings.topK,
@@ -4305,7 +4328,7 @@ async function generateObject({
4305
4328
  }),
4306
4329
  tracer,
4307
4330
  fn: async (span2) => {
4308
- var _a18, _b2, _c2, _d2, _e, _f, _g, _h;
4331
+ var _a18, _b2, _c2, _d2, _e2, _f, _g, _h;
4309
4332
  const result2 = await model.doGenerate({
4310
4333
  responseFormat: {
4311
4334
  type: "json",
@@ -4323,7 +4346,7 @@ async function generateObject({
4323
4346
  const responseData = {
4324
4347
  id: (_b2 = (_a18 = result2.response) == null ? void 0 : _a18.id) != null ? _b2 : generateId3(),
4325
4348
  timestamp: (_d2 = (_c2 = result2.response) == null ? void 0 : _c2.timestamp) != null ? _d2 : currentDate(),
4326
- modelId: (_f = (_e = result2.response) == null ? void 0 : _e.modelId) != null ? _f : model.modelId,
4349
+ modelId: (_f = (_e2 = result2.response) == null ? void 0 : _e2.modelId) != null ? _f : model.modelId,
4327
4350
  headers: (_g = result2.response) == null ? void 0 : _g.headers,
4328
4351
  body: (_h = result2.response) == null ? void 0 : _h.body
4329
4352
  };
@@ -4340,18 +4363,22 @@ async function generateObject({
4340
4363
  telemetry,
4341
4364
  attributes: {
4342
4365
  "ai.response.finishReason": result2.finishReason,
4343
- "ai.response.object": { output: () => result2.text },
4366
+ "ai.response.object": { output: () => {
4367
+ var _a19;
4368
+ return (_a19 = result2.text) == null ? void 0 : _a19.text;
4369
+ } },
4344
4370
  "ai.response.id": responseData.id,
4345
4371
  "ai.response.model": responseData.modelId,
4346
4372
  "ai.response.timestamp": responseData.timestamp.toISOString(),
4347
- "ai.usage.promptTokens": result2.usage.promptTokens,
4348
- "ai.usage.completionTokens": result2.usage.completionTokens,
4373
+ // TODO rename telemetry attributes to inputTokens and outputTokens
4374
+ "ai.usage.promptTokens": result2.usage.inputTokens,
4375
+ "ai.usage.completionTokens": result2.usage.outputTokens,
4349
4376
  // standardized gen-ai llm span attributes:
4350
4377
  "gen_ai.response.finish_reasons": [result2.finishReason],
4351
4378
  "gen_ai.response.id": responseData.id,
4352
4379
  "gen_ai.response.model": responseData.modelId,
4353
- "gen_ai.usage.prompt_tokens": result2.usage.promptTokens,
4354
- "gen_ai.usage.completion_tokens": result2.usage.completionTokens
4380
+ "gen_ai.usage.input_tokens": result2.usage.inputTokens,
4381
+ "gen_ai.usage.output_tokens": result2.usage.outputTokens
4355
4382
  }
4356
4383
  })
4357
4384
  );
@@ -4359,13 +4386,13 @@ async function generateObject({
4359
4386
  }
4360
4387
  })
4361
4388
  );
4362
- result = generateResult.objectText;
4389
+ result = (_b = generateResult.objectText) == null ? void 0 : _b.text;
4363
4390
  finishReason = generateResult.finishReason;
4364
4391
  usage = generateResult.usage;
4365
4392
  warnings = generateResult.warnings;
4366
4393
  logprobs = generateResult.logprobs;
4367
4394
  resultProviderMetadata = generateResult.providerMetadata;
4368
- request = (_b = generateResult.request) != null ? _b : {};
4395
+ request = (_c = generateResult.request) != null ? _c : {};
4369
4396
  response = generateResult.responseData;
4370
4397
  break;
4371
4398
  }
@@ -4377,7 +4404,7 @@ async function generateObject({
4377
4404
  const promptMessages = await convertToLanguageModelPrompt({
4378
4405
  prompt: standardizedPrompt,
4379
4406
  modelSupportsImageUrls: model.supportsImageUrls,
4380
- modelSupportsUrl: (_c = model.supportsUrl) == null ? void 0 : _c.bind(model)
4407
+ modelSupportsUrl: (_d = model.supportsUrl) == null ? void 0 : _d.bind(model)
4381
4408
  // support 'this' context,
4382
4409
  });
4383
4410
  const inputFormat = standardizedPrompt.type;
@@ -4403,7 +4430,7 @@ async function generateObject({
4403
4430
  "gen_ai.system": model.provider,
4404
4431
  "gen_ai.request.model": model.modelId,
4405
4432
  "gen_ai.request.frequency_penalty": settings.frequencyPenalty,
4406
- "gen_ai.request.max_tokens": settings.maxTokens,
4433
+ "gen_ai.request.max_tokens": settings.maxOutputTokens,
4407
4434
  "gen_ai.request.presence_penalty": settings.presencePenalty,
4408
4435
  "gen_ai.request.temperature": settings.temperature,
4409
4436
  "gen_ai.request.top_k": settings.topK,
@@ -4412,7 +4439,7 @@ async function generateObject({
4412
4439
  }),
4413
4440
  tracer,
4414
4441
  fn: async (span2) => {
4415
- var _a18, _b2, _c2, _d2, _e, _f, _g, _h, _i, _j;
4442
+ var _a18, _b2, _c2, _d2, _e2, _f, _g, _h, _i, _j;
4416
4443
  const result2 = await model.doGenerate({
4417
4444
  tools: [
4418
4445
  {
@@ -4433,7 +4460,7 @@ async function generateObject({
4433
4460
  const objectText = (_b2 = (_a18 = result2.toolCalls) == null ? void 0 : _a18[0]) == null ? void 0 : _b2.args;
4434
4461
  const responseData = {
4435
4462
  id: (_d2 = (_c2 = result2.response) == null ? void 0 : _c2.id) != null ? _d2 : generateId3(),
4436
- timestamp: (_f = (_e = result2.response) == null ? void 0 : _e.timestamp) != null ? _f : currentDate(),
4463
+ timestamp: (_f = (_e2 = result2.response) == null ? void 0 : _e2.timestamp) != null ? _f : currentDate(),
4437
4464
  modelId: (_h = (_g = result2.response) == null ? void 0 : _g.modelId) != null ? _h : model.modelId,
4438
4465
  headers: (_i = result2.response) == null ? void 0 : _i.headers,
4439
4466
  body: (_j = result2.response) == null ? void 0 : _j.body
@@ -4455,14 +4482,15 @@ async function generateObject({
4455
4482
  "ai.response.id": responseData.id,
4456
4483
  "ai.response.model": responseData.modelId,
4457
4484
  "ai.response.timestamp": responseData.timestamp.toISOString(),
4458
- "ai.usage.promptTokens": result2.usage.promptTokens,
4459
- "ai.usage.completionTokens": result2.usage.completionTokens,
4485
+ // TODO rename telemetry attributes to inputTokens and outputTokens
4486
+ "ai.usage.promptTokens": result2.usage.inputTokens,
4487
+ "ai.usage.completionTokens": result2.usage.outputTokens,
4460
4488
  // standardized gen-ai llm span attributes:
4461
4489
  "gen_ai.response.finish_reasons": [result2.finishReason],
4462
4490
  "gen_ai.response.id": responseData.id,
4463
4491
  "gen_ai.response.model": responseData.modelId,
4464
- "gen_ai.usage.input_tokens": result2.usage.promptTokens,
4465
- "gen_ai.usage.output_tokens": result2.usage.completionTokens
4492
+ "gen_ai.usage.input_tokens": result2.usage.inputTokens,
4493
+ "gen_ai.usage.output_tokens": result2.usage.outputTokens
4466
4494
  }
4467
4495
  })
4468
4496
  );
@@ -4476,7 +4504,7 @@ async function generateObject({
4476
4504
  warnings = generateResult.warnings;
4477
4505
  logprobs = generateResult.logprobs;
4478
4506
  resultProviderMetadata = generateResult.providerMetadata;
4479
- request = (_d = generateResult.request) != null ? _d : {};
4507
+ request = (_e = generateResult.request) != null ? _e : {};
4480
4508
  response = generateResult.responseData;
4481
4509
  break;
4482
4510
  }
@@ -4491,7 +4519,7 @@ async function generateObject({
4491
4519
  }
4492
4520
  }
4493
4521
  function processResult(result2) {
4494
- const parseResult = (0, import_provider_utils12.safeParseJSON)({ text: result2 });
4522
+ const parseResult = (0, import_provider_utils11.safeParseJSON)({ text: result2 });
4495
4523
  if (!parseResult.success) {
4496
4524
  throw new NoObjectGeneratedError({
4497
4525
  message: "No object generated: could not parse the response.",
@@ -4526,7 +4554,7 @@ async function generateObject({
4526
4554
  try {
4527
4555
  object2 = processResult(result);
4528
4556
  } catch (error) {
4529
- if (repairText != null && NoObjectGeneratedError.isInstance(error) && (import_provider12.JSONParseError.isInstance(error.cause) || import_provider12.TypeValidationError.isInstance(error.cause))) {
4557
+ if (repairText != null && NoObjectGeneratedError.isInstance(error) && (import_provider13.JSONParseError.isInstance(error.cause) || import_provider13.TypeValidationError.isInstance(error.cause))) {
4530
4558
  const repairedText = await repairText({
4531
4559
  text: result,
4532
4560
  error: error.cause
@@ -4547,8 +4575,9 @@ async function generateObject({
4547
4575
  "ai.response.object": {
4548
4576
  output: () => JSON.stringify(object2)
4549
4577
  },
4550
- "ai.usage.promptTokens": usage.promptTokens,
4551
- "ai.usage.completionTokens": usage.completionTokens
4578
+ // TODO rename telemetry attributes to inputTokens and outputTokens
4579
+ "ai.usage.promptTokens": usage.inputTokens,
4580
+ "ai.usage.completionTokens": usage.outputTokens
4552
4581
  }
4553
4582
  })
4554
4583
  );
@@ -4572,7 +4601,6 @@ var DefaultGenerateObjectResult = class {
4572
4601
  this.usage = options.usage;
4573
4602
  this.warnings = options.warnings;
4574
4603
  this.providerMetadata = options.providerMetadata;
4575
- this.experimental_providerMetadata = options.providerMetadata;
4576
4604
  this.response = options.response;
4577
4605
  this.request = options.request;
4578
4606
  this.logprobs = options.logprobs;
@@ -4589,7 +4617,7 @@ var DefaultGenerateObjectResult = class {
4589
4617
  };
4590
4618
 
4591
4619
  // core/generate-object/stream-object.ts
4592
- var import_provider_utils13 = require("@ai-sdk/provider-utils");
4620
+ var import_provider_utils12 = require("@ai-sdk/provider-utils");
4593
4621
 
4594
4622
  // util/delayed-promise.ts
4595
4623
  var DelayedPromise = class {
@@ -4733,7 +4761,7 @@ function now() {
4733
4761
  }
4734
4762
 
4735
4763
  // core/generate-object/stream-object.ts
4736
- var originalGenerateId2 = (0, import_provider_utils13.createIdGenerator)({ prefix: "aiobj", size: 24 });
4764
+ var originalGenerateId2 = (0, import_provider_utils12.createIdGenerator)({ prefix: "aiobj", size: 24 });
4737
4765
  function streamObject({
4738
4766
  model,
4739
4767
  schema: inputSchema,
@@ -4748,8 +4776,7 @@ function streamObject({
4748
4776
  abortSignal,
4749
4777
  headers,
4750
4778
  experimental_telemetry: telemetry,
4751
- experimental_providerMetadata,
4752
- providerOptions = experimental_providerMetadata,
4779
+ providerOptions,
4753
4780
  onError,
4754
4781
  onFinish,
4755
4782
  _internal: {
@@ -4906,8 +4933,8 @@ var DefaultStreamObjectResult = class {
4906
4933
  transformer = {
4907
4934
  transform: (chunk, controller) => {
4908
4935
  switch (chunk.type) {
4909
- case "text-delta":
4910
- controller.enqueue(chunk.textDelta);
4936
+ case "text":
4937
+ controller.enqueue(chunk.text);
4911
4938
  break;
4912
4939
  case "response-metadata":
4913
4940
  case "finish":
@@ -4998,7 +5025,7 @@ var DefaultStreamObjectResult = class {
4998
5025
  "gen_ai.system": model.provider,
4999
5026
  "gen_ai.request.model": model.modelId,
5000
5027
  "gen_ai.request.frequency_penalty": settings.frequencyPenalty,
5001
- "gen_ai.request.max_tokens": settings.maxTokens,
5028
+ "gen_ai.request.max_tokens": settings.maxOutputTokens,
5002
5029
  "gen_ai.request.presence_penalty": settings.presencePenalty,
5003
5030
  "gen_ai.request.temperature": settings.temperature,
5004
5031
  "gen_ai.request.top_k": settings.topK,
@@ -5186,8 +5213,7 @@ var DefaultStreamObjectResult = class {
5186
5213
  headers: response == null ? void 0 : response.headers
5187
5214
  },
5188
5215
  warnings,
5189
- providerMetadata,
5190
- experimental_providerMetadata: providerMetadata
5216
+ providerMetadata
5191
5217
  }));
5192
5218
  } catch (error2) {
5193
5219
  controller.enqueue({ type: "error", error: error2 });
@@ -5219,9 +5245,6 @@ var DefaultStreamObjectResult = class {
5219
5245
  get usage() {
5220
5246
  return this.usagePromise.value;
5221
5247
  }
5222
- get experimental_providerMetadata() {
5223
- return this.providerMetadataPromise.value;
5224
- }
5225
5248
  get providerMetadata() {
5226
5249
  return this.providerMetadataPromise.value;
5227
5250
  }
@@ -5309,39 +5332,39 @@ var DefaultStreamObjectResult = class {
5309
5332
  };
5310
5333
 
5311
5334
  // core/generate-text/generate-text.ts
5312
- var import_provider_utils15 = require("@ai-sdk/provider-utils");
5335
+ var import_provider_utils14 = require("@ai-sdk/provider-utils");
5313
5336
 
5314
5337
  // errors/no-output-specified-error.ts
5315
- var import_provider13 = require("@ai-sdk/provider");
5338
+ var import_provider14 = require("@ai-sdk/provider");
5316
5339
  var name9 = "AI_NoOutputSpecifiedError";
5317
5340
  var marker9 = `vercel.ai.error.${name9}`;
5318
5341
  var symbol9 = Symbol.for(marker9);
5319
5342
  var _a9;
5320
- var NoOutputSpecifiedError = class extends import_provider13.AISDKError {
5343
+ var NoOutputSpecifiedError = class extends import_provider14.AISDKError {
5321
5344
  // used in isInstance
5322
5345
  constructor({ message = "No output specified." } = {}) {
5323
5346
  super({ name: name9, message });
5324
5347
  this[_a9] = true;
5325
5348
  }
5326
5349
  static isInstance(error) {
5327
- return import_provider13.AISDKError.hasMarker(error, marker9);
5350
+ return import_provider14.AISDKError.hasMarker(error, marker9);
5328
5351
  }
5329
5352
  };
5330
5353
  _a9 = symbol9;
5331
5354
 
5332
5355
  // errors/tool-execution-error.ts
5333
- var import_provider14 = require("@ai-sdk/provider");
5356
+ var import_provider15 = require("@ai-sdk/provider");
5334
5357
  var name10 = "AI_ToolExecutionError";
5335
5358
  var marker10 = `vercel.ai.error.${name10}`;
5336
5359
  var symbol10 = Symbol.for(marker10);
5337
5360
  var _a10;
5338
- var ToolExecutionError = class extends import_provider14.AISDKError {
5361
+ var ToolExecutionError = class extends import_provider15.AISDKError {
5339
5362
  constructor({
5340
5363
  toolArgs,
5341
5364
  toolName,
5342
5365
  toolCallId,
5343
5366
  cause,
5344
- message = `Error executing tool ${toolName}: ${(0, import_provider14.getErrorMessage)(cause)}`
5367
+ message = `Error executing tool ${toolName}: ${(0, import_provider15.getErrorMessage)(cause)}`
5345
5368
  }) {
5346
5369
  super({ name: name10, message, cause });
5347
5370
  this[_a10] = true;
@@ -5350,7 +5373,7 @@ var ToolExecutionError = class extends import_provider14.AISDKError {
5350
5373
  this.toolCallId = toolCallId;
5351
5374
  }
5352
5375
  static isInstance(error) {
5353
- return import_provider14.AISDKError.hasMarker(error, marker10);
5376
+ return import_provider15.AISDKError.hasMarker(error, marker10);
5354
5377
  }
5355
5378
  };
5356
5379
  _a10 = symbol10;
@@ -5418,20 +5441,20 @@ function removeTextAfterLastWhitespace(text2) {
5418
5441
  }
5419
5442
 
5420
5443
  // core/generate-text/parse-tool-call.ts
5421
- var import_provider_utils14 = require("@ai-sdk/provider-utils");
5444
+ var import_provider_utils13 = require("@ai-sdk/provider-utils");
5422
5445
 
5423
5446
  // errors/invalid-tool-arguments-error.ts
5424
- var import_provider15 = require("@ai-sdk/provider");
5447
+ var import_provider16 = require("@ai-sdk/provider");
5425
5448
  var name11 = "AI_InvalidToolArgumentsError";
5426
5449
  var marker11 = `vercel.ai.error.${name11}`;
5427
5450
  var symbol11 = Symbol.for(marker11);
5428
5451
  var _a11;
5429
- var InvalidToolArgumentsError = class extends import_provider15.AISDKError {
5452
+ var InvalidToolArgumentsError = class extends import_provider16.AISDKError {
5430
5453
  constructor({
5431
5454
  toolArgs,
5432
5455
  toolName,
5433
5456
  cause,
5434
- message = `Invalid arguments for tool ${toolName}: ${(0, import_provider15.getErrorMessage)(
5457
+ message = `Invalid arguments for tool ${toolName}: ${(0, import_provider16.getErrorMessage)(
5435
5458
  cause
5436
5459
  )}`
5437
5460
  }) {
@@ -5441,18 +5464,18 @@ var InvalidToolArgumentsError = class extends import_provider15.AISDKError {
5441
5464
  this.toolName = toolName;
5442
5465
  }
5443
5466
  static isInstance(error) {
5444
- return import_provider15.AISDKError.hasMarker(error, marker11);
5467
+ return import_provider16.AISDKError.hasMarker(error, marker11);
5445
5468
  }
5446
5469
  };
5447
5470
  _a11 = symbol11;
5448
5471
 
5449
5472
  // errors/no-such-tool-error.ts
5450
- var import_provider16 = require("@ai-sdk/provider");
5473
+ var import_provider17 = require("@ai-sdk/provider");
5451
5474
  var name12 = "AI_NoSuchToolError";
5452
5475
  var marker12 = `vercel.ai.error.${name12}`;
5453
5476
  var symbol12 = Symbol.for(marker12);
5454
5477
  var _a12;
5455
- var NoSuchToolError = class extends import_provider16.AISDKError {
5478
+ var NoSuchToolError = class extends import_provider17.AISDKError {
5456
5479
  constructor({
5457
5480
  toolName,
5458
5481
  availableTools = void 0,
@@ -5464,29 +5487,29 @@ var NoSuchToolError = class extends import_provider16.AISDKError {
5464
5487
  this.availableTools = availableTools;
5465
5488
  }
5466
5489
  static isInstance(error) {
5467
- return import_provider16.AISDKError.hasMarker(error, marker12);
5490
+ return import_provider17.AISDKError.hasMarker(error, marker12);
5468
5491
  }
5469
5492
  };
5470
5493
  _a12 = symbol12;
5471
5494
 
5472
5495
  // errors/tool-call-repair-error.ts
5473
- var import_provider17 = require("@ai-sdk/provider");
5496
+ var import_provider18 = require("@ai-sdk/provider");
5474
5497
  var name13 = "AI_ToolCallRepairError";
5475
5498
  var marker13 = `vercel.ai.error.${name13}`;
5476
5499
  var symbol13 = Symbol.for(marker13);
5477
5500
  var _a13;
5478
- var ToolCallRepairError = class extends import_provider17.AISDKError {
5501
+ var ToolCallRepairError = class extends import_provider18.AISDKError {
5479
5502
  constructor({
5480
5503
  cause,
5481
5504
  originalError,
5482
- message = `Error repairing tool call: ${(0, import_provider17.getErrorMessage)(cause)}`
5505
+ message = `Error repairing tool call: ${(0, import_provider18.getErrorMessage)(cause)}`
5483
5506
  }) {
5484
5507
  super({ name: name13, message, cause });
5485
5508
  this[_a13] = true;
5486
5509
  this.originalError = originalError;
5487
5510
  }
5488
5511
  static isInstance(error) {
5489
- return import_provider17.AISDKError.hasMarker(error, marker13);
5512
+ return import_provider18.AISDKError.hasMarker(error, marker13);
5490
5513
  }
5491
5514
  };
5492
5515
  _a13 = symbol13;
@@ -5513,7 +5536,10 @@ async function parseToolCall({
5513
5536
  repairedToolCall = await repairToolCall({
5514
5537
  toolCall,
5515
5538
  tools,
5516
- parameterSchema: ({ toolName }) => asSchema(tools[toolName].parameters).jsonSchema,
5539
+ parameterSchema: ({ toolName }) => {
5540
+ const { parameters } = tools[toolName];
5541
+ return asSchema(parameters).jsonSchema;
5542
+ },
5517
5543
  system,
5518
5544
  messages,
5519
5545
  error
@@ -5543,7 +5569,7 @@ async function doParseToolCall({
5543
5569
  });
5544
5570
  }
5545
5571
  const schema = asSchema(tool2.parameters);
5546
- const parseResult = toolCall.args.trim() === "" ? (0, import_provider_utils14.safeValidateTypes)({ value: {}, schema }) : (0, import_provider_utils14.safeParseJSON)({ text: toolCall.args, schema });
5572
+ const parseResult = toolCall.args.trim() === "" ? (0, import_provider_utils13.safeValidateTypes)({ value: {}, schema }) : (0, import_provider_utils13.safeParseJSON)({ text: toolCall.args, schema });
5547
5573
  if (parseResult.success === false) {
5548
5574
  throw new InvalidToolArgumentsError({
5549
5575
  toolName,
@@ -5555,7 +5581,7 @@ async function doParseToolCall({
5555
5581
  type: "tool-call",
5556
5582
  toolCallId: toolCall.toolCallId,
5557
5583
  toolName,
5558
- args: parseResult.value
5584
+ args: parseResult == null ? void 0 : parseResult.value
5559
5585
  };
5560
5586
  }
5561
5587
 
@@ -5577,23 +5603,36 @@ function toResponseMessages({
5577
5603
  generateMessageId
5578
5604
  }) {
5579
5605
  const responseMessages = [];
5580
- responseMessages.push({
5581
- role: "assistant",
5582
- content: [
5606
+ const content = [];
5607
+ if (reasoning.length > 0) {
5608
+ content.push(
5583
5609
  ...reasoning.map(
5584
5610
  (part) => part.type === "text" ? { ...part, type: "reasoning" } : { ...part, type: "redacted-reasoning" }
5585
- ),
5586
- // TODO language model v2: switch to order response content (instead of type-based ordering)
5611
+ )
5612
+ );
5613
+ }
5614
+ if (files.length > 0) {
5615
+ content.push(
5587
5616
  ...files.map((file) => ({
5588
5617
  type: "file",
5589
5618
  data: file.base64,
5590
5619
  mediaType: file.mediaType
5591
- })),
5592
- { type: "text", text: text2 },
5593
- ...toolCalls
5594
- ],
5595
- id: messageId
5596
- });
5620
+ }))
5621
+ );
5622
+ }
5623
+ if (text2.length > 0) {
5624
+ content.push({ type: "text", text: text2 });
5625
+ }
5626
+ if (toolCalls.length > 0) {
5627
+ content.push(...toolCalls);
5628
+ }
5629
+ if (content.length > 0) {
5630
+ responseMessages.push({
5631
+ role: "assistant",
5632
+ content,
5633
+ id: messageId
5634
+ });
5635
+ }
5597
5636
  if (toolResults.length > 0) {
5598
5637
  responseMessages.push({
5599
5638
  role: "tool",
@@ -5621,11 +5660,11 @@ function toResponseMessages({
5621
5660
  }
5622
5661
 
5623
5662
  // core/generate-text/generate-text.ts
5624
- var originalGenerateId3 = (0, import_provider_utils15.createIdGenerator)({
5663
+ var originalGenerateId3 = (0, import_provider_utils14.createIdGenerator)({
5625
5664
  prefix: "aitxt",
5626
5665
  size: 24
5627
5666
  });
5628
- var originalGenerateMessageId = (0, import_provider_utils15.createIdGenerator)({
5667
+ var originalGenerateMessageId = (0, import_provider_utils14.createIdGenerator)({
5629
5668
  prefix: "msg",
5630
5669
  size: 24
5631
5670
  });
@@ -5644,8 +5683,7 @@ async function generateText({
5644
5683
  experimental_output: output,
5645
5684
  experimental_continueSteps: continueSteps = false,
5646
5685
  experimental_telemetry: telemetry,
5647
- experimental_providerMetadata,
5648
- providerOptions = experimental_providerMetadata,
5686
+ providerOptions,
5649
5687
  experimental_activeTools: activeTools,
5650
5688
  experimental_repairToolCall: repairToolCall,
5651
5689
  _internal: {
@@ -5698,7 +5736,7 @@ async function generateText({
5698
5736
  }),
5699
5737
  tracer,
5700
5738
  fn: async (span) => {
5701
- var _a18, _b, _c, _d, _e, _f, _g;
5739
+ var _a18, _b, _c, _d, _e, _f, _g, _h;
5702
5740
  const toolsAndToolChoice = {
5703
5741
  ...prepareToolsAndToolChoice({ tools, toolChoice, activeTools })
5704
5742
  };
@@ -5763,7 +5801,7 @@ async function generateText({
5763
5801
  "gen_ai.system": model.provider,
5764
5802
  "gen_ai.request.model": model.modelId,
5765
5803
  "gen_ai.request.frequency_penalty": settings.frequencyPenalty,
5766
- "gen_ai.request.max_tokens": settings.maxTokens,
5804
+ "gen_ai.request.max_tokens": settings.maxOutputTokens,
5767
5805
  "gen_ai.request.presence_penalty": settings.presencePenalty,
5768
5806
  "gen_ai.request.stop_sequences": settings.stopSequences,
5769
5807
  "gen_ai.request.temperature": settings.temperature,
@@ -5773,7 +5811,7 @@ async function generateText({
5773
5811
  }),
5774
5812
  tracer,
5775
5813
  fn: async (span2) => {
5776
- var _a19, _b2, _c2, _d2, _e2, _f2, _g2, _h;
5814
+ var _a19, _b2, _c2, _d2, _e2, _f2, _g2, _h2;
5777
5815
  const result = await model.doGenerate({
5778
5816
  ...callSettings,
5779
5817
  ...toolsAndToolChoice,
@@ -5789,7 +5827,7 @@ async function generateText({
5789
5827
  timestamp: (_d2 = (_c2 = result.response) == null ? void 0 : _c2.timestamp) != null ? _d2 : currentDate(),
5790
5828
  modelId: (_f2 = (_e2 = result.response) == null ? void 0 : _e2.modelId) != null ? _f2 : model.modelId,
5791
5829
  headers: (_g2 = result.response) == null ? void 0 : _g2.headers,
5792
- body: (_h = result.response) == null ? void 0 : _h.body
5830
+ body: (_h2 = result.response) == null ? void 0 : _h2.body
5793
5831
  };
5794
5832
  span2.setAttributes(
5795
5833
  selectTelemetryAttributes({
@@ -5797,22 +5835,36 @@ async function generateText({
5797
5835
  attributes: {
5798
5836
  "ai.response.finishReason": result.finishReason,
5799
5837
  "ai.response.text": {
5800
- output: () => result.text
5838
+ output: () => {
5839
+ var _a20;
5840
+ return (_a20 = result.text) == null ? void 0 : _a20.text;
5841
+ }
5801
5842
  },
5802
5843
  "ai.response.toolCalls": {
5803
- output: () => JSON.stringify(result.toolCalls)
5844
+ output: () => {
5845
+ var _a20;
5846
+ return JSON.stringify(
5847
+ (_a20 = result.toolCalls) == null ? void 0 : _a20.map((toolCall) => ({
5848
+ toolCallType: toolCall.toolCallType,
5849
+ toolCallId: toolCall.toolCallId,
5850
+ toolName: toolCall.toolName,
5851
+ args: toolCall.args
5852
+ }))
5853
+ );
5854
+ }
5804
5855
  },
5805
5856
  "ai.response.id": responseData.id,
5806
5857
  "ai.response.model": responseData.modelId,
5807
5858
  "ai.response.timestamp": responseData.timestamp.toISOString(),
5808
- "ai.usage.promptTokens": result.usage.promptTokens,
5809
- "ai.usage.completionTokens": result.usage.completionTokens,
5859
+ // TODO rename telemetry attributes to inputTokens and outputTokens
5860
+ "ai.usage.promptTokens": result.usage.inputTokens,
5861
+ "ai.usage.completionTokens": result.usage.outputTokens,
5810
5862
  // standardized gen-ai llm span attributes:
5811
5863
  "gen_ai.response.finish_reasons": [result.finishReason],
5812
5864
  "gen_ai.response.id": responseData.id,
5813
5865
  "gen_ai.response.model": responseData.modelId,
5814
- "gen_ai.usage.input_tokens": result.usage.promptTokens,
5815
- "gen_ai.usage.output_tokens": result.usage.completionTokens
5866
+ "gen_ai.usage.input_tokens": result.usage.inputTokens,
5867
+ "gen_ai.usage.output_tokens": result.usage.outputTokens
5816
5868
  }
5817
5869
  })
5818
5870
  );
@@ -5856,7 +5908,7 @@ async function generateText({
5856
5908
  nextStepType = "tool-result";
5857
5909
  }
5858
5910
  }
5859
- const originalText = (_c = currentModelResponse.text) != null ? _c : "";
5911
+ const originalText = (_d = (_c = currentModelResponse.text) == null ? void 0 : _c.text) != null ? _d : "";
5860
5912
  const stepTextLeadingWhitespaceTrimmed = stepType === "continue" && // only for continue steps
5861
5913
  text2.trimEnd() !== text2 ? originalText.trimStart() : originalText;
5862
5914
  const stepText = nextStepType === "continue" ? removeTextAfterLastWhitespace(stepTextLeadingWhitespaceTrimmed) : stepTextLeadingWhitespaceTrimmed;
@@ -5864,7 +5916,7 @@ async function generateText({
5864
5916
  currentReasoningDetails = asReasoningDetails(
5865
5917
  currentModelResponse.reasoning
5866
5918
  );
5867
- sources.push(...(_d = currentModelResponse.sources) != null ? _d : []);
5919
+ sources.push(...(_e = currentModelResponse.sources) != null ? _e : []);
5868
5920
  if (stepType === "continue") {
5869
5921
  const lastMessage = responseMessages[responseMessages.length - 1];
5870
5922
  if (typeof lastMessage.content === "string") {
@@ -5896,21 +5948,20 @@ async function generateText({
5896
5948
  reasoning: asReasoningText(currentReasoningDetails),
5897
5949
  reasoningDetails: currentReasoningDetails,
5898
5950
  files: asFiles(currentModelResponse.files),
5899
- sources: (_e = currentModelResponse.sources) != null ? _e : [],
5951
+ sources: (_f = currentModelResponse.sources) != null ? _f : [],
5900
5952
  toolCalls: currentToolCalls,
5901
5953
  toolResults: currentToolResults,
5902
5954
  finishReason: currentModelResponse.finishReason,
5903
5955
  usage: currentUsage,
5904
5956
  warnings: currentModelResponse.warnings,
5905
5957
  logprobs: currentModelResponse.logprobs,
5906
- request: (_f = currentModelResponse.request) != null ? _f : {},
5958
+ request: (_g = currentModelResponse.request) != null ? _g : {},
5907
5959
  response: {
5908
5960
  ...currentModelResponse.response,
5909
5961
  // deep clone msgs to avoid mutating past messages in multi-step:
5910
5962
  messages: structuredClone(responseMessages)
5911
5963
  },
5912
5964
  providerMetadata: currentModelResponse.providerMetadata,
5913
- experimental_providerMetadata: currentModelResponse.providerMetadata,
5914
5965
  isContinued: nextStepType === "continue"
5915
5966
  };
5916
5967
  steps.push(currentStepResult);
@@ -5923,13 +5974,27 @@ async function generateText({
5923
5974
  attributes: {
5924
5975
  "ai.response.finishReason": currentModelResponse.finishReason,
5925
5976
  "ai.response.text": {
5926
- output: () => currentModelResponse.text
5977
+ output: () => {
5978
+ var _a19;
5979
+ return (_a19 = currentModelResponse.text) == null ? void 0 : _a19.text;
5980
+ }
5927
5981
  },
5928
5982
  "ai.response.toolCalls": {
5929
- output: () => JSON.stringify(currentModelResponse.toolCalls)
5983
+ output: () => {
5984
+ var _a19;
5985
+ return JSON.stringify(
5986
+ (_a19 = currentModelResponse.toolCalls) == null ? void 0 : _a19.map((toolCall) => ({
5987
+ toolCallType: toolCall.toolCallType,
5988
+ toolCallId: toolCall.toolCallId,
5989
+ toolName: toolCall.toolName,
5990
+ args: toolCall.args
5991
+ }))
5992
+ );
5993
+ }
5930
5994
  },
5931
- "ai.usage.promptTokens": currentModelResponse.usage.promptTokens,
5932
- "ai.usage.completionTokens": currentModelResponse.usage.completionTokens
5995
+ // TODO rename telemetry attributes to inputTokens and outputTokens
5996
+ "ai.usage.promptTokens": currentModelResponse.usage.inputTokens,
5997
+ "ai.usage.completionTokens": currentModelResponse.usage.outputTokens
5933
5998
  }
5934
5999
  })
5935
6000
  );
@@ -5957,7 +6022,7 @@ async function generateText({
5957
6022
  finishReason: currentModelResponse.finishReason,
5958
6023
  usage,
5959
6024
  warnings: currentModelResponse.warnings,
5960
- request: (_g = currentModelResponse.request) != null ? _g : {},
6025
+ request: (_h = currentModelResponse.request) != null ? _h : {},
5961
6026
  response: {
5962
6027
  ...currentModelResponse.response,
5963
6028
  messages: responseMessages
@@ -6058,7 +6123,6 @@ var DefaultGenerateTextResult = class {
6058
6123
  this.request = options.request;
6059
6124
  this.response = options.response;
6060
6125
  this.steps = options.steps;
6061
- this.experimental_providerMetadata = options.providerMetadata;
6062
6126
  this.providerMetadata = options.providerMetadata;
6063
6127
  this.logprobs = options.logprobs;
6064
6128
  this.outputResolver = options.outputResolver;
@@ -6072,10 +6136,28 @@ function asReasoningDetails(reasoning) {
6072
6136
  if (reasoning == null) {
6073
6137
  return [];
6074
6138
  }
6075
- if (typeof reasoning === "string") {
6076
- return [{ type: "text", text: reasoning }];
6139
+ const result = [];
6140
+ let activeReasoningText;
6141
+ for (const part of reasoning) {
6142
+ if (part.reasoningType === "text") {
6143
+ if (activeReasoningText == null) {
6144
+ activeReasoningText = { type: "text", text: part.text };
6145
+ result.push(activeReasoningText);
6146
+ } else {
6147
+ activeReasoningText.text += part.text;
6148
+ }
6149
+ } else if (part.reasoningType === "signature") {
6150
+ if (activeReasoningText == null) {
6151
+ activeReasoningText = { type: "text", text: "" };
6152
+ result.push(activeReasoningText);
6153
+ }
6154
+ activeReasoningText.signature = part.signature;
6155
+ activeReasoningText = void 0;
6156
+ } else if (part.reasoningType === "redacted") {
6157
+ result.push({ type: "redacted", data: part.data });
6158
+ }
6077
6159
  }
6078
- return reasoning;
6160
+ return result;
6079
6161
  }
6080
6162
  function asFiles(files) {
6081
6163
  var _a17;
@@ -6088,18 +6170,18 @@ __export(output_exports, {
6088
6170
  object: () => object,
6089
6171
  text: () => text
6090
6172
  });
6091
- var import_provider_utils16 = require("@ai-sdk/provider-utils");
6173
+ var import_provider_utils15 = require("@ai-sdk/provider-utils");
6092
6174
 
6093
6175
  // errors/index.ts
6094
- var import_provider20 = require("@ai-sdk/provider");
6176
+ var import_provider21 = require("@ai-sdk/provider");
6095
6177
 
6096
6178
  // errors/invalid-stream-part-error.ts
6097
- var import_provider18 = require("@ai-sdk/provider");
6179
+ var import_provider19 = require("@ai-sdk/provider");
6098
6180
  var name14 = "AI_InvalidStreamPartError";
6099
6181
  var marker14 = `vercel.ai.error.${name14}`;
6100
6182
  var symbol14 = Symbol.for(marker14);
6101
6183
  var _a14;
6102
- var InvalidStreamPartError = class extends import_provider18.AISDKError {
6184
+ var InvalidStreamPartError = class extends import_provider19.AISDKError {
6103
6185
  constructor({
6104
6186
  chunk,
6105
6187
  message
@@ -6109,18 +6191,18 @@ var InvalidStreamPartError = class extends import_provider18.AISDKError {
6109
6191
  this.chunk = chunk;
6110
6192
  }
6111
6193
  static isInstance(error) {
6112
- return import_provider18.AISDKError.hasMarker(error, marker14);
6194
+ return import_provider19.AISDKError.hasMarker(error, marker14);
6113
6195
  }
6114
6196
  };
6115
6197
  _a14 = symbol14;
6116
6198
 
6117
6199
  // errors/mcp-client-error.ts
6118
- var import_provider19 = require("@ai-sdk/provider");
6200
+ var import_provider20 = require("@ai-sdk/provider");
6119
6201
  var name15 = "AI_MCPClientError";
6120
6202
  var marker15 = `vercel.ai.error.${name15}`;
6121
6203
  var symbol15 = Symbol.for(marker15);
6122
6204
  var _a15;
6123
- var MCPClientError = class extends import_provider19.AISDKError {
6205
+ var MCPClientError = class extends import_provider20.AISDKError {
6124
6206
  constructor({
6125
6207
  name: name17 = "MCPClientError",
6126
6208
  message,
@@ -6130,7 +6212,7 @@ var MCPClientError = class extends import_provider19.AISDKError {
6130
6212
  this[_a15] = true;
6131
6213
  }
6132
6214
  static isInstance(error) {
6133
- return import_provider19.AISDKError.hasMarker(error, marker15);
6215
+ return import_provider20.AISDKError.hasMarker(error, marker15);
6134
6216
  }
6135
6217
  };
6136
6218
  _a15 = symbol15;
@@ -6184,7 +6266,7 @@ var object = ({
6184
6266
  }
6185
6267
  },
6186
6268
  parseOutput({ text: text2 }, context) {
6187
- const parseResult = (0, import_provider_utils16.safeParseJSON)({ text: text2 });
6269
+ const parseResult = (0, import_provider_utils15.safeParseJSON)({ text: text2 });
6188
6270
  if (!parseResult.success) {
6189
6271
  throw new NoObjectGeneratedError({
6190
6272
  message: "No object generated: could not parse the response.",
@@ -6195,7 +6277,7 @@ var object = ({
6195
6277
  finishReason: context.finishReason
6196
6278
  });
6197
6279
  }
6198
- const validationResult = (0, import_provider_utils16.safeValidateTypes)({
6280
+ const validationResult = (0, import_provider_utils15.safeValidateTypes)({
6199
6281
  value: parseResult.value,
6200
6282
  schema
6201
6283
  });
@@ -6215,8 +6297,8 @@ var object = ({
6215
6297
  };
6216
6298
 
6217
6299
  // core/generate-text/smooth-stream.ts
6218
- var import_provider_utils17 = require("@ai-sdk/provider-utils");
6219
- var import_provider21 = require("@ai-sdk/provider");
6300
+ var import_provider_utils16 = require("@ai-sdk/provider-utils");
6301
+ var import_provider22 = require("@ai-sdk/provider");
6220
6302
  var CHUNKING_REGEXPS = {
6221
6303
  word: /\S+\s+/m,
6222
6304
  line: /\n+/m
@@ -6224,7 +6306,7 @@ var CHUNKING_REGEXPS = {
6224
6306
  function smoothStream({
6225
6307
  delayInMs = 10,
6226
6308
  chunking = "word",
6227
- _internal: { delay: delay2 = import_provider_utils17.delay } = {}
6309
+ _internal: { delay: delay2 = import_provider_utils16.delay } = {}
6228
6310
  } = {}) {
6229
6311
  let detectChunk;
6230
6312
  if (typeof chunking === "function") {
@@ -6246,7 +6328,7 @@ function smoothStream({
6246
6328
  } else {
6247
6329
  const chunkingRegex = typeof chunking === "string" ? CHUNKING_REGEXPS[chunking] : chunking;
6248
6330
  if (chunkingRegex == null) {
6249
- throw new import_provider21.InvalidArgumentError({
6331
+ throw new import_provider22.InvalidArgumentError({
6250
6332
  argument: "chunking",
6251
6333
  message: `Chunking must be "word" or "line" or a RegExp. Received: ${chunking}`
6252
6334
  });
@@ -6263,18 +6345,18 @@ function smoothStream({
6263
6345
  let buffer = "";
6264
6346
  return new TransformStream({
6265
6347
  async transform(chunk, controller) {
6266
- if (chunk.type !== "text-delta") {
6348
+ if (chunk.type !== "text") {
6267
6349
  if (buffer.length > 0) {
6268
- controller.enqueue({ type: "text-delta", textDelta: buffer });
6350
+ controller.enqueue({ type: "text", text: buffer });
6269
6351
  buffer = "";
6270
6352
  }
6271
6353
  controller.enqueue(chunk);
6272
6354
  return;
6273
6355
  }
6274
- buffer += chunk.textDelta;
6356
+ buffer += chunk.text;
6275
6357
  let match;
6276
6358
  while ((match = detectChunk(buffer)) != null) {
6277
- controller.enqueue({ type: "text-delta", textDelta: match });
6359
+ controller.enqueue({ type: "text", text: match });
6278
6360
  buffer = buffer.slice(match.length);
6279
6361
  await delay2(delayInMs);
6280
6362
  }
@@ -6284,8 +6366,8 @@ function smoothStream({
6284
6366
  }
6285
6367
 
6286
6368
  // core/generate-text/stream-text.ts
6287
- var import_provider22 = require("@ai-sdk/provider");
6288
- var import_provider_utils18 = require("@ai-sdk/provider-utils");
6369
+ var import_provider23 = require("@ai-sdk/provider");
6370
+ var import_provider_utils17 = require("@ai-sdk/provider-utils");
6289
6371
 
6290
6372
  // util/as-array.ts
6291
6373
  function asArray(value) {
@@ -6433,10 +6515,8 @@ function runToolsTransformation({
6433
6515
  async transform(chunk, controller) {
6434
6516
  const chunkType = chunk.type;
6435
6517
  switch (chunkType) {
6436
- case "text-delta":
6518
+ case "text":
6437
6519
  case "reasoning":
6438
- case "reasoning-signature":
6439
- case "redacted-reasoning":
6440
6520
  case "source":
6441
6521
  case "response-metadata":
6442
6522
  case "error": {
@@ -6444,12 +6524,13 @@ function runToolsTransformation({
6444
6524
  break;
6445
6525
  }
6446
6526
  case "file": {
6447
- controller.enqueue(
6448
- new DefaultGeneratedFileWithType({
6527
+ controller.enqueue({
6528
+ type: "file",
6529
+ file: new DefaultGeneratedFileWithType({
6449
6530
  data: chunk.data,
6450
6531
  mediaType: chunk.mediaType
6451
6532
  })
6452
- );
6533
+ });
6453
6534
  break;
6454
6535
  }
6455
6536
  case "tool-call-delta": {
@@ -6559,7 +6640,7 @@ function runToolsTransformation({
6559
6640
  finishReason: chunk.finishReason,
6560
6641
  logprobs: chunk.logprobs,
6561
6642
  usage: calculateLanguageModelUsage2(chunk.usage),
6562
- experimental_providerMetadata: chunk.providerMetadata
6643
+ providerMetadata: chunk.providerMetadata
6563
6644
  };
6564
6645
  break;
6565
6646
  }
@@ -6602,11 +6683,11 @@ function runToolsTransformation({
6602
6683
  }
6603
6684
 
6604
6685
  // core/generate-text/stream-text.ts
6605
- var originalGenerateId4 = (0, import_provider_utils18.createIdGenerator)({
6686
+ var originalGenerateId4 = (0, import_provider_utils17.createIdGenerator)({
6606
6687
  prefix: "aitxt",
6607
6688
  size: 24
6608
6689
  });
6609
- var originalGenerateMessageId2 = (0, import_provider_utils18.createIdGenerator)({
6690
+ var originalGenerateMessageId2 = (0, import_provider_utils17.createIdGenerator)({
6610
6691
  prefix: "msg",
6611
6692
  size: 24
6612
6693
  });
@@ -6625,8 +6706,7 @@ function streamText({
6625
6706
  experimental_output: output,
6626
6707
  experimental_continueSteps: continueSteps = false,
6627
6708
  experimental_telemetry: telemetry,
6628
- experimental_providerMetadata,
6629
- providerOptions = experimental_providerMetadata,
6709
+ providerOptions,
6630
6710
  experimental_toolCallStreaming = false,
6631
6711
  toolCallStreaming = experimental_toolCallStreaming,
6632
6712
  experimental_activeTools: activeTools,
@@ -6689,7 +6769,7 @@ function createOutputTransformStream(output) {
6689
6769
  partialOutput = void 0
6690
6770
  }) {
6691
6771
  controller.enqueue({
6692
- part: { type: "text-delta", textDelta: textChunk },
6772
+ part: { type: "text", text: textChunk },
6693
6773
  partialOutput
6694
6774
  });
6695
6775
  textChunk = "";
@@ -6699,12 +6779,12 @@ function createOutputTransformStream(output) {
6699
6779
  if (chunk.type === "step-finish") {
6700
6780
  publishTextChunk({ controller });
6701
6781
  }
6702
- if (chunk.type !== "text-delta") {
6782
+ if (chunk.type !== "text") {
6703
6783
  controller.enqueue({ part: chunk, partialOutput: void 0 });
6704
6784
  return;
6705
6785
  }
6706
- text2 += chunk.textDelta;
6707
- textChunk += chunk.textDelta;
6786
+ text2 += chunk.text;
6787
+ textChunk += chunk.text;
6708
6788
  const result = output.parsePartial({ text: text2 });
6709
6789
  if (result != null) {
6710
6790
  const currentJson = JSON.stringify(result.partial);
@@ -6799,44 +6879,44 @@ var DefaultStreamTextResult = class {
6799
6879
  async transform(chunk, controller) {
6800
6880
  controller.enqueue(chunk);
6801
6881
  const { part } = chunk;
6802
- if (part.type === "text-delta" || part.type === "reasoning" || part.type === "source" || part.type === "tool-call" || part.type === "tool-result" || part.type === "tool-call-streaming-start" || part.type === "tool-call-delta") {
6882
+ if (part.type === "text" || part.type === "reasoning" || part.type === "source" || part.type === "tool-call" || part.type === "tool-result" || part.type === "tool-call-streaming-start" || part.type === "tool-call-delta") {
6803
6883
  await (onChunk == null ? void 0 : onChunk({ chunk: part }));
6804
6884
  }
6805
6885
  if (part.type === "error") {
6806
6886
  await (onError == null ? void 0 : onError({ error: part.error }));
6807
6887
  }
6808
- if (part.type === "text-delta") {
6809
- recordedStepText += part.textDelta;
6810
- recordedContinuationText += part.textDelta;
6811
- recordedFullText += part.textDelta;
6888
+ if (part.type === "text") {
6889
+ recordedStepText += part.text;
6890
+ recordedContinuationText += part.text;
6891
+ recordedFullText += part.text;
6812
6892
  }
6813
6893
  if (part.type === "reasoning") {
6814
- if (activeReasoningText == null) {
6815
- activeReasoningText = { type: "text", text: part.textDelta };
6816
- stepReasoning.push(activeReasoningText);
6817
- } else {
6818
- activeReasoningText.text += part.textDelta;
6819
- }
6820
- }
6821
- if (part.type === "reasoning-signature") {
6822
- if (activeReasoningText == null) {
6823
- throw new import_provider22.AISDKError({
6824
- name: "InvalidStreamPart",
6825
- message: "reasoning-signature without reasoning"
6826
- });
6894
+ if (part.reasoningType === "text") {
6895
+ if (activeReasoningText == null) {
6896
+ activeReasoningText = { type: "text", text: part.text };
6897
+ stepReasoning.push(activeReasoningText);
6898
+ } else {
6899
+ activeReasoningText.text += part.text;
6900
+ }
6901
+ } else if (part.reasoningType === "signature") {
6902
+ if (activeReasoningText == null) {
6903
+ throw new import_provider23.AISDKError({
6904
+ name: "InvalidStreamPart",
6905
+ message: "reasoning-signature without reasoning"
6906
+ });
6907
+ }
6908
+ activeReasoningText.signature = part.signature;
6909
+ activeReasoningText = void 0;
6910
+ } else if (part.reasoningType === "redacted") {
6911
+ stepReasoning.push({ type: "redacted", data: part.data });
6827
6912
  }
6828
- activeReasoningText.signature = part.signature;
6829
- activeReasoningText = void 0;
6830
- }
6831
- if (part.type === "redacted-reasoning") {
6832
- stepReasoning.push({ type: "redacted", data: part.data });
6833
6913
  }
6834
6914
  if (part.type === "file") {
6835
- stepFiles.push(part);
6915
+ stepFiles.push(part.file);
6836
6916
  }
6837
6917
  if (part.type === "source") {
6838
- recordedSources.push(part.source);
6839
- recordedStepSources.push(part.source);
6918
+ recordedSources.push(part);
6919
+ recordedStepSources.push(part);
6840
6920
  }
6841
6921
  if (part.type === "tool-call") {
6842
6922
  recordedToolCalls.push(part);
@@ -6887,8 +6967,7 @@ var DefaultStreamTextResult = class {
6887
6967
  ...part.response,
6888
6968
  messages: [...recordedResponse.messages, ...stepMessages]
6889
6969
  },
6890
- providerMetadata: part.experimental_providerMetadata,
6891
- experimental_providerMetadata: part.experimental_providerMetadata,
6970
+ providerMetadata: part.providerMetadata,
6892
6971
  isContinued: part.isContinued
6893
6972
  };
6894
6973
  await (onStepFinish == null ? void 0 : onStepFinish(currentStepResult));
@@ -6929,9 +7008,7 @@ var DefaultStreamTextResult = class {
6929
7008
  self.responsePromise.resolve(lastStep.response);
6930
7009
  self.toolCallsPromise.resolve(lastStep.toolCalls);
6931
7010
  self.toolResultsPromise.resolve(lastStep.toolResults);
6932
- self.providerMetadataPromise.resolve(
6933
- lastStep.experimental_providerMetadata
6934
- );
7011
+ self.providerMetadataPromise.resolve(lastStep.providerMetadata);
6935
7012
  self.reasoningPromise.resolve(lastStep.reasoning);
6936
7013
  self.reasoningDetailsPromise.resolve(lastStep.reasoningDetails);
6937
7014
  const finishReason = recordedFinishReason != null ? recordedFinishReason : "unknown";
@@ -6961,7 +7038,6 @@ var DefaultStreamTextResult = class {
6961
7038
  response: lastStep.response,
6962
7039
  warnings: lastStep.warnings,
6963
7040
  providerMetadata: lastStep.providerMetadata,
6964
- experimental_providerMetadata: lastStep.experimental_providerMetadata,
6965
7041
  steps: recordedSteps
6966
7042
  }));
6967
7043
  rootSpan.setAttributes(
@@ -7105,7 +7181,7 @@ var DefaultStreamTextResult = class {
7105
7181
  "gen_ai.system": model.provider,
7106
7182
  "gen_ai.request.model": model.modelId,
7107
7183
  "gen_ai.request.frequency_penalty": settings.frequencyPenalty,
7108
- "gen_ai.request.max_tokens": settings.maxTokens,
7184
+ "gen_ai.request.max_tokens": settings.maxOutputTokens,
7109
7185
  "gen_ai.request.presence_penalty": settings.presencePenalty,
7110
7186
  "gen_ai.request.stop_sequences": settings.stopSequences,
7111
7187
  "gen_ai.request.temperature": settings.temperature,
@@ -7174,10 +7250,10 @@ var DefaultStreamTextResult = class {
7174
7250
  chunk
7175
7251
  }) {
7176
7252
  controller.enqueue(chunk);
7177
- stepText += chunk.textDelta;
7178
- fullStepText += chunk.textDelta;
7253
+ stepText += chunk.text;
7254
+ fullStepText += chunk.text;
7179
7255
  chunkTextPublished = true;
7180
- hasWhitespaceSuffix = chunk.textDelta.trimEnd() !== chunk.textDelta;
7256
+ hasWhitespaceSuffix = chunk.text.trimEnd() !== chunk.text;
7181
7257
  }
7182
7258
  self.addStream(
7183
7259
  transformedStream.pipeThrough(
@@ -7200,14 +7276,14 @@ var DefaultStreamTextResult = class {
7200
7276
  warnings: warnings != null ? warnings : []
7201
7277
  });
7202
7278
  }
7203
- if (chunk.type === "text-delta" && chunk.textDelta.length === 0) {
7279
+ if (chunk.type === "text" && chunk.text.length === 0) {
7204
7280
  return;
7205
7281
  }
7206
7282
  const chunkType = chunk.type;
7207
7283
  switch (chunkType) {
7208
- case "text-delta": {
7284
+ case "text": {
7209
7285
  if (continueSteps) {
7210
- const trimmedChunkText = inWhitespacePrefix && hasLeadingWhitespace ? chunk.textDelta.trimStart() : chunk.textDelta;
7286
+ const trimmedChunkText = inWhitespacePrefix && hasLeadingWhitespace ? chunk.text.trimStart() : chunk.text;
7211
7287
  if (trimmedChunkText.length === 0) {
7212
7288
  break;
7213
7289
  }
@@ -7219,8 +7295,8 @@ var DefaultStreamTextResult = class {
7219
7295
  await publishTextChunk({
7220
7296
  controller,
7221
7297
  chunk: {
7222
- type: "text-delta",
7223
- textDelta: split.prefix + split.whitespace
7298
+ type: "text",
7299
+ text: split.prefix + split.whitespace
7224
7300
  }
7225
7301
  });
7226
7302
  }
@@ -7231,35 +7307,31 @@ var DefaultStreamTextResult = class {
7231
7307
  }
7232
7308
  case "reasoning": {
7233
7309
  controller.enqueue(chunk);
7234
- if (activeReasoningText2 == null) {
7235
- activeReasoningText2 = {
7236
- type: "text",
7237
- text: chunk.textDelta
7238
- };
7239
- stepReasoning2.push(activeReasoningText2);
7240
- } else {
7241
- activeReasoningText2.text += chunk.textDelta;
7242
- }
7243
- break;
7244
- }
7245
- case "reasoning-signature": {
7246
- controller.enqueue(chunk);
7247
- if (activeReasoningText2 == null) {
7248
- throw new InvalidStreamPartError({
7249
- chunk,
7250
- message: "reasoning-signature without reasoning"
7310
+ if (chunk.reasoningType === "text") {
7311
+ if (activeReasoningText2 == null) {
7312
+ activeReasoningText2 = {
7313
+ type: "text",
7314
+ text: chunk.text
7315
+ };
7316
+ stepReasoning2.push(activeReasoningText2);
7317
+ } else {
7318
+ activeReasoningText2.text += chunk.text;
7319
+ }
7320
+ } else if (chunk.reasoningType === "signature") {
7321
+ if (activeReasoningText2 == null) {
7322
+ throw new InvalidStreamPartError({
7323
+ chunk,
7324
+ message: "reasoning-signature without reasoning"
7325
+ });
7326
+ }
7327
+ activeReasoningText2.signature = chunk.signature;
7328
+ activeReasoningText2 = void 0;
7329
+ } else if (chunk.reasoningType === "redacted") {
7330
+ stepReasoning2.push({
7331
+ type: "redacted",
7332
+ data: chunk.data
7251
7333
  });
7252
7334
  }
7253
- activeReasoningText2.signature = chunk.signature;
7254
- activeReasoningText2 = void 0;
7255
- break;
7256
- }
7257
- case "redacted-reasoning": {
7258
- controller.enqueue(chunk);
7259
- stepReasoning2.push({
7260
- type: "redacted",
7261
- data: chunk.data
7262
- });
7263
7335
  break;
7264
7336
  }
7265
7337
  case "tool-call": {
@@ -7283,7 +7355,7 @@ var DefaultStreamTextResult = class {
7283
7355
  case "finish": {
7284
7356
  stepUsage = chunk.usage;
7285
7357
  stepFinishReason = chunk.finishReason;
7286
- stepProviderMetadata = chunk.experimental_providerMetadata;
7358
+ stepProviderMetadata = chunk.providerMetadata;
7287
7359
  stepLogProbs = chunk.logprobs;
7288
7360
  const msToFinish = now2() - startTimestampMs;
7289
7361
  doStreamSpan.addEvent("ai.stream.finish");
@@ -7294,7 +7366,7 @@ var DefaultStreamTextResult = class {
7294
7366
  break;
7295
7367
  }
7296
7368
  case "file": {
7297
- stepFiles2.push(chunk);
7369
+ stepFiles2.push(chunk.file);
7298
7370
  controller.enqueue(chunk);
7299
7371
  break;
7300
7372
  }
@@ -7335,10 +7407,7 @@ var DefaultStreamTextResult = class {
7335
7407
  stepType2 === "continue" && !chunkTextPublished)) {
7336
7408
  await publishTextChunk({
7337
7409
  controller,
7338
- chunk: {
7339
- type: "text-delta",
7340
- textDelta: chunkBuffer
7341
- }
7410
+ chunk: { type: "text", text: chunkBuffer }
7342
7411
  });
7343
7412
  chunkBuffer = "";
7344
7413
  }
@@ -7375,7 +7444,6 @@ var DefaultStreamTextResult = class {
7375
7444
  finishReason: stepFinishReason,
7376
7445
  usage: stepUsage,
7377
7446
  providerMetadata: stepProviderMetadata,
7378
- experimental_providerMetadata: stepProviderMetadata,
7379
7447
  logprobs: stepLogProbs,
7380
7448
  request: stepRequest,
7381
7449
  response: {
@@ -7393,7 +7461,6 @@ var DefaultStreamTextResult = class {
7393
7461
  finishReason: stepFinishReason,
7394
7462
  usage: combinedUsage,
7395
7463
  providerMetadata: stepProviderMetadata,
7396
- experimental_providerMetadata: stepProviderMetadata,
7397
7464
  logprobs: stepLogProbs,
7398
7465
  response: {
7399
7466
  ...stepResponse,
@@ -7479,9 +7546,6 @@ var DefaultStreamTextResult = class {
7479
7546
  get finishReason() {
7480
7547
  return this.finishReasonPromise.value;
7481
7548
  }
7482
- get experimental_providerMetadata() {
7483
- return this.providerMetadataPromise.value;
7484
- }
7485
7549
  get providerMetadata() {
7486
7550
  return this.providerMetadataPromise.value;
7487
7551
  }
@@ -7533,8 +7597,8 @@ var DefaultStreamTextResult = class {
7533
7597
  this.teeStream().pipeThrough(
7534
7598
  new TransformStream({
7535
7599
  transform({ part }, controller) {
7536
- if (part.type === "text-delta") {
7537
- controller.enqueue(part.textDelta);
7600
+ if (part.type === "text") {
7601
+ controller.enqueue(part.text);
7538
7602
  }
7539
7603
  }
7540
7604
  })
@@ -7592,52 +7656,45 @@ var DefaultStreamTextResult = class {
7592
7656
  transform: async (chunk, controller) => {
7593
7657
  const chunkType = chunk.type;
7594
7658
  switch (chunkType) {
7595
- case "text-delta": {
7596
- controller.enqueue(formatDataStreamPart("text", chunk.textDelta));
7659
+ case "text": {
7660
+ controller.enqueue(formatDataStreamPart("text", chunk.text));
7597
7661
  break;
7598
7662
  }
7599
7663
  case "reasoning": {
7600
7664
  if (sendReasoning) {
7601
- controller.enqueue(
7602
- formatDataStreamPart("reasoning", chunk.textDelta)
7603
- );
7604
- }
7605
- break;
7606
- }
7607
- case "redacted-reasoning": {
7608
- if (sendReasoning) {
7609
- controller.enqueue(
7610
- formatDataStreamPart("redacted_reasoning", {
7611
- data: chunk.data
7612
- })
7613
- );
7614
- }
7615
- break;
7616
- }
7617
- case "reasoning-signature": {
7618
- if (sendReasoning) {
7619
- controller.enqueue(
7620
- formatDataStreamPart("reasoning_signature", {
7621
- signature: chunk.signature
7622
- })
7623
- );
7665
+ if (chunk.reasoningType === "text") {
7666
+ controller.enqueue(
7667
+ formatDataStreamPart("reasoning", chunk.text)
7668
+ );
7669
+ } else if (chunk.reasoningType === "signature") {
7670
+ controller.enqueue(
7671
+ formatDataStreamPart("reasoning_signature", {
7672
+ signature: chunk.signature
7673
+ })
7674
+ );
7675
+ } else if (chunk.reasoningType === "redacted") {
7676
+ controller.enqueue(
7677
+ formatDataStreamPart("redacted_reasoning", {
7678
+ data: chunk.data
7679
+ })
7680
+ );
7681
+ }
7624
7682
  }
7625
7683
  break;
7626
7684
  }
7627
7685
  case "file": {
7628
7686
  controller.enqueue(
7687
+ // TODO update protocol to v2 or replace with event stream
7629
7688
  formatDataStreamPart("file", {
7630
- mimeType: chunk.mediaType,
7631
- data: chunk.base64
7689
+ mimeType: chunk.file.mediaType,
7690
+ data: chunk.file.base64
7632
7691
  })
7633
7692
  );
7634
7693
  break;
7635
7694
  }
7636
7695
  case "source": {
7637
7696
  if (sendSources) {
7638
- controller.enqueue(
7639
- formatDataStreamPart("source", chunk.source)
7640
- );
7697
+ controller.enqueue(formatDataStreamPart("source", chunk));
7641
7698
  }
7642
7699
  break;
7643
7700
  }
@@ -7832,8 +7889,8 @@ var DefaultStreamTextResult = class {
7832
7889
  };
7833
7890
 
7834
7891
  // errors/no-transcript-generated-error.ts
7835
- var import_provider23 = require("@ai-sdk/provider");
7836
- var NoTranscriptGeneratedError = class extends import_provider23.AISDKError {
7892
+ var import_provider24 = require("@ai-sdk/provider");
7893
+ var NoTranscriptGeneratedError = class extends import_provider24.AISDKError {
7837
7894
  constructor(options) {
7838
7895
  super({
7839
7896
  name: "AI_NoTranscriptGeneratedError",
@@ -7982,15 +8039,15 @@ function extractReasoningMiddleware({
7982
8039
  wrapGenerate: async ({ doGenerate }) => {
7983
8040
  const { text: rawText, ...rest } = await doGenerate();
7984
8041
  if (rawText == null) {
7985
- return { text: rawText, ...rest };
8042
+ return { text: void 0, ...rest };
7986
8043
  }
7987
- const text2 = startWithReasoning ? openingTag + rawText : rawText;
8044
+ const text2 = startWithReasoning ? openingTag + rawText.text : rawText.text;
7988
8045
  const regexp = new RegExp(`${openingTag}(.*?)${closingTag}`, "gs");
7989
8046
  const matches = Array.from(text2.matchAll(regexp));
7990
8047
  if (!matches.length) {
7991
- return { text: text2, ...rest };
8048
+ return { text: { type: "text", text: text2 }, ...rest };
7992
8049
  }
7993
- const reasoning = matches.map((match) => match[1]).join(separator);
8050
+ const reasoningText = matches.map((match) => match[1]).join(separator);
7994
8051
  let textWithoutReasoning = text2;
7995
8052
  for (let i = matches.length - 1; i >= 0; i--) {
7996
8053
  const match = matches[i];
@@ -8000,7 +8057,17 @@ function extractReasoningMiddleware({
8000
8057
  );
8001
8058
  textWithoutReasoning = beforeMatch + (beforeMatch.length > 0 && afterMatch.length > 0 ? separator : "") + afterMatch;
8002
8059
  }
8003
- return { ...rest, text: textWithoutReasoning, reasoning };
8060
+ return {
8061
+ ...rest,
8062
+ text: { type: "text", text: textWithoutReasoning },
8063
+ reasoning: reasoningText.length > 0 ? [
8064
+ {
8065
+ type: "reasoning",
8066
+ reasoningType: "text",
8067
+ text: reasoningText
8068
+ }
8069
+ ] : void 0
8070
+ };
8004
8071
  },
8005
8072
  wrapStream: async ({ doStream }) => {
8006
8073
  const { stream, ...rest } = await doStream();
@@ -8013,18 +8080,24 @@ function extractReasoningMiddleware({
8013
8080
  stream: stream.pipeThrough(
8014
8081
  new TransformStream({
8015
8082
  transform: (chunk, controller) => {
8016
- if (chunk.type !== "text-delta") {
8083
+ if (chunk.type !== "text") {
8017
8084
  controller.enqueue(chunk);
8018
8085
  return;
8019
8086
  }
8020
- buffer += chunk.textDelta;
8087
+ buffer += chunk.text;
8021
8088
  function publish(text2) {
8022
8089
  if (text2.length > 0) {
8023
8090
  const prefix = afterSwitch && (isReasoning ? !isFirstReasoning : !isFirstText) ? separator : "";
8024
- controller.enqueue({
8025
- type: isReasoning ? "reasoning" : "text-delta",
8026
- textDelta: prefix + text2
8027
- });
8091
+ controller.enqueue(
8092
+ isReasoning ? {
8093
+ type: "reasoning",
8094
+ reasoningType: "text",
8095
+ text: prefix + text2
8096
+ } : {
8097
+ type: "text",
8098
+ text: prefix + text2
8099
+ }
8100
+ );
8028
8101
  afterSwitch = false;
8029
8102
  if (isReasoning) {
8030
8103
  isFirstReasoning = false;
@@ -8071,43 +8144,12 @@ function simulateStreamingMiddleware() {
8071
8144
  start(controller) {
8072
8145
  controller.enqueue({ type: "response-metadata", ...result.response });
8073
8146
  if (result.reasoning) {
8074
- if (typeof result.reasoning === "string") {
8075
- controller.enqueue({
8076
- type: "reasoning",
8077
- textDelta: result.reasoning
8078
- });
8079
- } else {
8080
- for (const reasoning of result.reasoning) {
8081
- switch (reasoning.type) {
8082
- case "text": {
8083
- controller.enqueue({
8084
- type: "reasoning",
8085
- textDelta: reasoning.text
8086
- });
8087
- if (reasoning.signature != null) {
8088
- controller.enqueue({
8089
- type: "reasoning-signature",
8090
- signature: reasoning.signature
8091
- });
8092
- }
8093
- break;
8094
- }
8095
- case "redacted": {
8096
- controller.enqueue({
8097
- type: "redacted-reasoning",
8098
- data: reasoning.data
8099
- });
8100
- break;
8101
- }
8102
- }
8103
- }
8147
+ for (const reasoningPart of result.reasoning) {
8148
+ controller.enqueue(reasoningPart);
8104
8149
  }
8105
8150
  }
8106
8151
  if (result.text) {
8107
- controller.enqueue({
8108
- type: "text-delta",
8109
- textDelta: result.text
8110
- });
8152
+ controller.enqueue(result.text);
8111
8153
  }
8112
8154
  if (result.toolCalls) {
8113
8155
  for (const toolCall of result.toolCalls) {
@@ -8118,10 +8160,7 @@ function simulateStreamingMiddleware() {
8118
8160
  toolName: toolCall.toolName,
8119
8161
  argsTextDelta: toolCall.args
8120
8162
  });
8121
- controller.enqueue({
8122
- type: "tool-call",
8123
- ...toolCall
8124
- });
8163
+ controller.enqueue(toolCall);
8125
8164
  }
8126
8165
  }
8127
8166
  controller.enqueue({
@@ -8209,7 +8248,7 @@ function appendClientMessage({
8209
8248
  }
8210
8249
 
8211
8250
  // core/prompt/append-response-messages.ts
8212
- var import_provider24 = require("@ai-sdk/provider");
8251
+ var import_provider25 = require("@ai-sdk/provider");
8213
8252
  function appendResponseMessages({
8214
8253
  messages,
8215
8254
  responseMessages,
@@ -8292,7 +8331,7 @@ function appendResponseMessages({
8292
8331
  break;
8293
8332
  case "file":
8294
8333
  if (part.data instanceof URL) {
8295
- throw new import_provider24.AISDKError({
8334
+ throw new import_provider25.AISDKError({
8296
8335
  name: "InvalidAssistantFileData",
8297
8336
  message: "File data cannot be a URL"
8298
8337
  });
@@ -8386,7 +8425,7 @@ function appendResponseMessages({
8386
8425
  }
8387
8426
 
8388
8427
  // core/registry/custom-provider.ts
8389
- var import_provider25 = require("@ai-sdk/provider");
8428
+ var import_provider26 = require("@ai-sdk/provider");
8390
8429
  function customProvider({
8391
8430
  languageModels,
8392
8431
  textEmbeddingModels,
@@ -8401,7 +8440,7 @@ function customProvider({
8401
8440
  if (fallbackProvider) {
8402
8441
  return fallbackProvider.languageModel(modelId);
8403
8442
  }
8404
- throw new import_provider25.NoSuchModelError({ modelId, modelType: "languageModel" });
8443
+ throw new import_provider26.NoSuchModelError({ modelId, modelType: "languageModel" });
8405
8444
  },
8406
8445
  textEmbeddingModel(modelId) {
8407
8446
  if (textEmbeddingModels != null && modelId in textEmbeddingModels) {
@@ -8410,7 +8449,7 @@ function customProvider({
8410
8449
  if (fallbackProvider) {
8411
8450
  return fallbackProvider.textEmbeddingModel(modelId);
8412
8451
  }
8413
- throw new import_provider25.NoSuchModelError({ modelId, modelType: "textEmbeddingModel" });
8452
+ throw new import_provider26.NoSuchModelError({ modelId, modelType: "textEmbeddingModel" });
8414
8453
  },
8415
8454
  imageModel(modelId) {
8416
8455
  if (imageModels != null && modelId in imageModels) {
@@ -8419,19 +8458,19 @@ function customProvider({
8419
8458
  if (fallbackProvider == null ? void 0 : fallbackProvider.imageModel) {
8420
8459
  return fallbackProvider.imageModel(modelId);
8421
8460
  }
8422
- throw new import_provider25.NoSuchModelError({ modelId, modelType: "imageModel" });
8461
+ throw new import_provider26.NoSuchModelError({ modelId, modelType: "imageModel" });
8423
8462
  }
8424
8463
  };
8425
8464
  }
8426
8465
  var experimental_customProvider = customProvider;
8427
8466
 
8428
8467
  // core/registry/no-such-provider-error.ts
8429
- var import_provider26 = require("@ai-sdk/provider");
8468
+ var import_provider27 = require("@ai-sdk/provider");
8430
8469
  var name16 = "AI_NoSuchProviderError";
8431
8470
  var marker16 = `vercel.ai.error.${name16}`;
8432
8471
  var symbol16 = Symbol.for(marker16);
8433
8472
  var _a16;
8434
- var NoSuchProviderError = class extends import_provider26.NoSuchModelError {
8473
+ var NoSuchProviderError = class extends import_provider27.NoSuchModelError {
8435
8474
  constructor({
8436
8475
  modelId,
8437
8476
  modelType,
@@ -8445,13 +8484,13 @@ var NoSuchProviderError = class extends import_provider26.NoSuchModelError {
8445
8484
  this.availableProviders = availableProviders;
8446
8485
  }
8447
8486
  static isInstance(error) {
8448
- return import_provider26.AISDKError.hasMarker(error, marker16);
8487
+ return import_provider27.AISDKError.hasMarker(error, marker16);
8449
8488
  }
8450
8489
  };
8451
8490
  _a16 = symbol16;
8452
8491
 
8453
8492
  // core/registry/provider-registry.ts
8454
- var import_provider27 = require("@ai-sdk/provider");
8493
+ var import_provider28 = require("@ai-sdk/provider");
8455
8494
  function createProviderRegistry(providers, {
8456
8495
  separator = ":"
8457
8496
  } = {}) {
@@ -8490,20 +8529,20 @@ var DefaultProviderRegistry = class {
8490
8529
  splitId(id, modelType) {
8491
8530
  const index = id.indexOf(this.separator);
8492
8531
  if (index === -1) {
8493
- throw new import_provider27.NoSuchModelError({
8532
+ throw new import_provider28.NoSuchModelError({
8494
8533
  modelId: id,
8495
8534
  modelType,
8496
8535
  message: `Invalid ${modelType} id for registry: ${id} (must be in the format "providerId${this.separator}modelId")`
8497
8536
  });
8498
8537
  }
8499
- return [id.slice(0, index), id.slice(index + 1)];
8538
+ return [id.slice(0, index), id.slice(index + this.separator.length)];
8500
8539
  }
8501
8540
  languageModel(id) {
8502
8541
  var _a17, _b;
8503
8542
  const [providerId, modelId] = this.splitId(id, "languageModel");
8504
8543
  const model = (_b = (_a17 = this.getProvider(providerId)).languageModel) == null ? void 0 : _b.call(_a17, modelId);
8505
8544
  if (model == null) {
8506
- throw new import_provider27.NoSuchModelError({ modelId: id, modelType: "languageModel" });
8545
+ throw new import_provider28.NoSuchModelError({ modelId: id, modelType: "languageModel" });
8507
8546
  }
8508
8547
  return model;
8509
8548
  }
@@ -8513,7 +8552,7 @@ var DefaultProviderRegistry = class {
8513
8552
  const provider = this.getProvider(providerId);
8514
8553
  const model = (_a17 = provider.textEmbeddingModel) == null ? void 0 : _a17.call(provider, modelId);
8515
8554
  if (model == null) {
8516
- throw new import_provider27.NoSuchModelError({
8555
+ throw new import_provider28.NoSuchModelError({
8517
8556
  modelId: id,
8518
8557
  modelType: "textEmbeddingModel"
8519
8558
  });
@@ -8526,7 +8565,7 @@ var DefaultProviderRegistry = class {
8526
8565
  const provider = this.getProvider(providerId);
8527
8566
  const model = (_a17 = provider.imageModel) == null ? void 0 : _a17.call(provider, modelId);
8528
8567
  if (model == null) {
8529
- throw new import_provider27.NoSuchModelError({ modelId: id, modelType: "imageModel" });
8568
+ throw new import_provider28.NoSuchModelError({ modelId: id, modelType: "imageModel" });
8530
8569
  }
8531
8570
  return model;
8532
8571
  }
@@ -8538,7 +8577,7 @@ function tool(tool2) {
8538
8577
  }
8539
8578
 
8540
8579
  // core/tool/mcp/mcp-sse-transport.ts
8541
- var import_provider_utils19 = require("@ai-sdk/provider-utils");
8580
+ var import_provider_utils18 = require("@ai-sdk/provider-utils");
8542
8581
 
8543
8582
  // core/tool/mcp/json-rpc-message.ts
8544
8583
  var import_zod9 = require("zod");
@@ -8709,7 +8748,7 @@ var SseMCPTransport = class {
8709
8748
  (_b = this.onerror) == null ? void 0 : _b.call(this, error);
8710
8749
  return reject(error);
8711
8750
  }
8712
- const stream = response.body.pipeThrough(new TextDecoderStream()).pipeThrough((0, import_provider_utils19.createEventSourceParserStream)());
8751
+ const stream = response.body.pipeThrough(new TextDecoderStream()).pipeThrough((0, import_provider_utils18.createEventSourceParserStream)());
8713
8752
  const reader = stream.getReader();
8714
8753
  const processEvents = async () => {
8715
8754
  var _a18, _b2, _c2;
@@ -9016,6 +9055,7 @@ var MCPClient = class {
9016
9055
  async tools({
9017
9056
  schemas = "automatic"
9018
9057
  } = {}) {
9058
+ var _a17;
9019
9059
  const tools = {};
9020
9060
  try {
9021
9061
  const listToolsResult = await this.listTools();
@@ -9023,14 +9063,18 @@ var MCPClient = class {
9023
9063
  if (schemas !== "automatic" && !(name17 in schemas)) {
9024
9064
  continue;
9025
9065
  }
9026
- const parameters = schemas === "automatic" ? jsonSchema(inputSchema) : schemas[name17].parameters;
9066
+ const parameters = schemas === "automatic" ? jsonSchema({
9067
+ ...inputSchema,
9068
+ properties: (_a17 = inputSchema.properties) != null ? _a17 : {},
9069
+ additionalProperties: false
9070
+ }) : schemas[name17].parameters;
9027
9071
  const self = this;
9028
9072
  const toolWithExecute = tool({
9029
9073
  description,
9030
9074
  parameters,
9031
9075
  execute: async (args, options) => {
9032
- var _a17;
9033
- (_a17 = options == null ? void 0 : options.abortSignal) == null ? void 0 : _a17.throwIfAborted();
9076
+ var _a18;
9077
+ (_a18 = options == null ? void 0 : options.abortSignal) == null ? void 0 : _a18.throwIfAborted();
9034
9078
  return self.callTool({
9035
9079
  name: name17,
9036
9080
  args,
@@ -9083,7 +9127,7 @@ var MCPClient = class {
9083
9127
  };
9084
9128
 
9085
9129
  // core/util/cosine-similarity.ts
9086
- function cosineSimilarity(vector1, vector2, options) {
9130
+ function cosineSimilarity(vector1, vector2) {
9087
9131
  if (vector1.length !== vector2.length) {
9088
9132
  throw new InvalidArgumentError({
9089
9133
  parameter: "vector1,vector2",
@@ -9093,13 +9137,6 @@ function cosineSimilarity(vector1, vector2, options) {
9093
9137
  }
9094
9138
  const n = vector1.length;
9095
9139
  if (n === 0) {
9096
- if (options == null ? void 0 : options.throwErrorForEmptyVectors) {
9097
- throw new InvalidArgumentError({
9098
- parameter: "vector1",
9099
- value: vector1,
9100
- message: "Vectors cannot be empty"
9101
- });
9102
- }
9103
9140
  return 0;
9104
9141
  }
9105
9142
  let magnitudeSquared1 = 0;
@@ -9116,7 +9153,7 @@ function cosineSimilarity(vector1, vector2, options) {
9116
9153
  }
9117
9154
 
9118
9155
  // core/util/simulate-readable-stream.ts
9119
- var import_provider_utils20 = require("@ai-sdk/provider-utils");
9156
+ var import_provider_utils19 = require("@ai-sdk/provider-utils");
9120
9157
  function simulateReadableStream({
9121
9158
  chunks,
9122
9159
  initialDelayInMs = 0,
@@ -9124,7 +9161,7 @@ function simulateReadableStream({
9124
9161
  _internal
9125
9162
  }) {
9126
9163
  var _a17;
9127
- const delay2 = (_a17 = _internal == null ? void 0 : _internal.delay) != null ? _a17 : import_provider_utils20.delay;
9164
+ const delay2 = (_a17 = _internal == null ? void 0 : _internal.delay) != null ? _a17 : import_provider_utils19.delay;
9128
9165
  let index = 0;
9129
9166
  return new ReadableStream({
9130
9167
  async pull(controller) {
@@ -9251,10 +9288,10 @@ __export(llamaindex_adapter_exports, {
9251
9288
  toDataStream: () => toDataStream2,
9252
9289
  toDataStreamResponse: () => toDataStreamResponse2
9253
9290
  });
9254
- var import_provider_utils22 = require("@ai-sdk/provider-utils");
9291
+ var import_provider_utils21 = require("@ai-sdk/provider-utils");
9255
9292
  function toDataStreamInternal2(stream, callbacks) {
9256
9293
  const trimStart = trimStartOfStream();
9257
- return (0, import_provider_utils22.convertAsyncIteratorToReadableStream)(stream[Symbol.asyncIterator]()).pipeThrough(
9294
+ return (0, import_provider_utils21.convertAsyncIteratorToReadableStream)(stream[Symbol.asyncIterator]()).pipeThrough(
9258
9295
  new TransformStream({
9259
9296
  async transform(message, controller) {
9260
9297
  controller.enqueue(trimStart(message.delta));