ai 5.0.41 → 5.0.43

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,20 @@
1
1
  # ai
2
2
 
3
+ ## 5.0.43
4
+
5
+ ### Patch Changes
6
+
7
+ - 0294b58: feat(ai): set `ai`, `@ai-sdk/provider-utils`, and runtime in `user-agent` header
8
+ - Updated dependencies [0294b58]
9
+ - @ai-sdk/provider-utils@3.0.9
10
+ - @ai-sdk/gateway@1.0.22
11
+
12
+ ## 5.0.42
13
+
14
+ ### Patch Changes
15
+
16
+ - de5c066: fix(ai): forwarded providerExecuted flag in validateUIMessages
17
+
3
18
  ## 5.0.41
4
19
 
5
20
  ### Patch Changes
package/dist/index.js CHANGED
@@ -58,7 +58,7 @@ __export(src_exports, {
58
58
  UI_MESSAGE_STREAM_HEADERS: () => UI_MESSAGE_STREAM_HEADERS,
59
59
  UnsupportedFunctionalityError: () => import_provider17.UnsupportedFunctionalityError,
60
60
  UnsupportedModelVersionError: () => UnsupportedModelVersionError,
61
- asSchema: () => import_provider_utils29.asSchema,
61
+ asSchema: () => import_provider_utils34.asSchema,
62
62
  assistantModelMessageSchema: () => assistantModelMessageSchema,
63
63
  callCompletionApi: () => callCompletionApi,
64
64
  consumeStream: () => consumeStream,
@@ -72,14 +72,14 @@ __export(src_exports, {
72
72
  coreUserMessageSchema: () => coreUserMessageSchema,
73
73
  cosineSimilarity: () => cosineSimilarity,
74
74
  createGateway: () => import_gateway3.createGateway,
75
- createIdGenerator: () => import_provider_utils29.createIdGenerator,
75
+ createIdGenerator: () => import_provider_utils34.createIdGenerator,
76
76
  createProviderRegistry: () => createProviderRegistry,
77
77
  createTextStreamResponse: () => createTextStreamResponse,
78
78
  createUIMessageStream: () => createUIMessageStream,
79
79
  createUIMessageStreamResponse: () => createUIMessageStreamResponse,
80
80
  customProvider: () => customProvider,
81
81
  defaultSettingsMiddleware: () => defaultSettingsMiddleware,
82
- dynamicTool: () => import_provider_utils29.dynamicTool,
82
+ dynamicTool: () => import_provider_utils34.dynamicTool,
83
83
  embed: () => embed,
84
84
  embedMany: () => embedMany,
85
85
  experimental_createMCPClient: () => createMCPClient,
@@ -90,7 +90,7 @@ __export(src_exports, {
90
90
  experimental_transcribe: () => transcribe,
91
91
  extractReasoningMiddleware: () => extractReasoningMiddleware,
92
92
  gateway: () => import_gateway3.gateway,
93
- generateId: () => import_provider_utils29.generateId,
93
+ generateId: () => import_provider_utils34.generateId,
94
94
  generateObject: () => generateObject,
95
95
  generateText: () => generateText,
96
96
  getTextFromDataUrl: () => getTextFromDataUrl,
@@ -100,7 +100,7 @@ __export(src_exports, {
100
100
  isDeepEqualData: () => isDeepEqualData,
101
101
  isToolOrDynamicToolUIPart: () => isToolOrDynamicToolUIPart,
102
102
  isToolUIPart: () => isToolUIPart,
103
- jsonSchema: () => import_provider_utils29.jsonSchema,
103
+ jsonSchema: () => import_provider_utils34.jsonSchema,
104
104
  lastAssistantMessageIsCompleteWithToolCalls: () => lastAssistantMessageIsCompleteWithToolCalls,
105
105
  modelMessageSchema: () => modelMessageSchema,
106
106
  parsePartialJson: () => parsePartialJson,
@@ -114,17 +114,17 @@ __export(src_exports, {
114
114
  streamObject: () => streamObject,
115
115
  streamText: () => streamText,
116
116
  systemModelMessageSchema: () => systemModelMessageSchema,
117
- tool: () => import_provider_utils29.tool,
117
+ tool: () => import_provider_utils34.tool,
118
118
  toolModelMessageSchema: () => toolModelMessageSchema,
119
119
  userModelMessageSchema: () => userModelMessageSchema,
120
120
  validateUIMessages: () => validateUIMessages,
121
121
  wrapLanguageModel: () => wrapLanguageModel,
122
122
  wrapProvider: () => wrapProvider,
123
- zodSchema: () => import_provider_utils29.zodSchema
123
+ zodSchema: () => import_provider_utils34.zodSchema
124
124
  });
125
125
  module.exports = __toCommonJS(src_exports);
126
126
  var import_gateway3 = require("@ai-sdk/gateway");
127
- var import_provider_utils29 = require("@ai-sdk/provider-utils");
127
+ var import_provider_utils34 = require("@ai-sdk/provider-utils");
128
128
 
129
129
  // src/generate-text/generate-text.ts
130
130
  var import_provider_utils9 = require("@ai-sdk/provider-utils");
@@ -2158,6 +2158,9 @@ function toResponseMessages({
2158
2158
  return responseMessages;
2159
2159
  }
2160
2160
 
2161
+ // src/version.ts
2162
+ var VERSION = true ? "5.0.43" : "0.0.0-test";
2163
+
2161
2164
  // src/generate-text/generate-text.ts
2162
2165
  var originalGenerateId = (0, import_provider_utils9.createIdGenerator)({
2163
2166
  prefix: "aitxt",
@@ -2198,10 +2201,14 @@ async function generateText({
2198
2201
  abortSignal
2199
2202
  });
2200
2203
  const callSettings = prepareCallSettings(settings);
2204
+ const headersWithUserAgent = (0, import_provider_utils9.withUserAgentSuffix)(
2205
+ headers != null ? headers : {},
2206
+ `ai/${VERSION}`
2207
+ );
2201
2208
  const baseTelemetryAttributes = getBaseTelemetryAttributes({
2202
2209
  model,
2203
2210
  telemetry,
2204
- headers,
2211
+ headers: headersWithUserAgent,
2205
2212
  settings: { ...callSettings, maxRetries }
2206
2213
  });
2207
2214
  const initialPrompt = await standardizePrompt({
@@ -2316,7 +2323,7 @@ async function generateText({
2316
2323
  prompt: promptMessages,
2317
2324
  providerOptions,
2318
2325
  abortSignal,
2319
- headers
2326
+ headers: headersWithUserAgent
2320
2327
  });
2321
2328
  const responseData = {
2322
2329
  id: (_b2 = (_a19 = result.response) == null ? void 0 : _a19.id) != null ? _b2 : generateId3(),
@@ -5962,6 +5969,7 @@ var Agent = class {
5962
5969
  };
5963
5970
 
5964
5971
  // src/embed/embed.ts
5972
+ var import_provider_utils14 = require("@ai-sdk/provider-utils");
5965
5973
  async function embed({
5966
5974
  model: modelArg,
5967
5975
  value,
@@ -5976,10 +5984,14 @@ async function embed({
5976
5984
  maxRetries: maxRetriesArg,
5977
5985
  abortSignal
5978
5986
  });
5987
+ const headersWithUserAgent = (0, import_provider_utils14.withUserAgentSuffix)(
5988
+ headers != null ? headers : {},
5989
+ `ai/${VERSION}`
5990
+ );
5979
5991
  const baseTelemetryAttributes = getBaseTelemetryAttributes({
5980
5992
  model,
5981
5993
  telemetry,
5982
- headers,
5994
+ headers: headersWithUserAgent,
5983
5995
  settings: { maxRetries }
5984
5996
  });
5985
5997
  const tracer = getTracer(telemetry);
@@ -6018,7 +6030,7 @@ async function embed({
6018
6030
  const modelResponse = await model.doEmbed({
6019
6031
  values: [value],
6020
6032
  abortSignal,
6021
- headers,
6033
+ headers: headersWithUserAgent,
6022
6034
  providerOptions
6023
6035
  });
6024
6036
  const embedding2 = modelResponse.embeddings[0];
@@ -6075,6 +6087,9 @@ var DefaultEmbedResult = class {
6075
6087
  }
6076
6088
  };
6077
6089
 
6090
+ // src/embed/embed-many.ts
6091
+ var import_provider_utils15 = require("@ai-sdk/provider-utils");
6092
+
6078
6093
  // src/util/split-array.ts
6079
6094
  function splitArray(array, chunkSize) {
6080
6095
  if (chunkSize <= 0) {
@@ -6103,10 +6118,14 @@ async function embedMany({
6103
6118
  maxRetries: maxRetriesArg,
6104
6119
  abortSignal
6105
6120
  });
6121
+ const headersWithUserAgent = (0, import_provider_utils15.withUserAgentSuffix)(
6122
+ headers != null ? headers : {},
6123
+ `ai/${VERSION}`
6124
+ );
6106
6125
  const baseTelemetryAttributes = getBaseTelemetryAttributes({
6107
6126
  model,
6108
6127
  telemetry,
6109
- headers,
6128
+ headers: headersWithUserAgent,
6110
6129
  settings: { maxRetries }
6111
6130
  });
6112
6131
  const tracer = getTracer(telemetry);
@@ -6155,7 +6174,7 @@ async function embedMany({
6155
6174
  const modelResponse = await model.doEmbed({
6156
6175
  values,
6157
6176
  abortSignal,
6158
- headers,
6177
+ headers: headersWithUserAgent,
6159
6178
  providerOptions
6160
6179
  });
6161
6180
  const embeddings3 = modelResponse.embeddings;
@@ -6237,7 +6256,7 @@ async function embedMany({
6237
6256
  const modelResponse = await model.doEmbed({
6238
6257
  values: chunk,
6239
6258
  abortSignal,
6240
- headers,
6259
+ headers: headersWithUserAgent,
6241
6260
  providerOptions
6242
6261
  });
6243
6262
  const embeddings2 = modelResponse.embeddings;
@@ -6318,6 +6337,7 @@ var DefaultEmbedManyResult = class {
6318
6337
  };
6319
6338
 
6320
6339
  // src/generate-image/generate-image.ts
6340
+ var import_provider_utils16 = require("@ai-sdk/provider-utils");
6321
6341
  async function generateImage({
6322
6342
  model,
6323
6343
  prompt,
@@ -6339,6 +6359,10 @@ async function generateImage({
6339
6359
  modelId: model.modelId
6340
6360
  });
6341
6361
  }
6362
+ const headersWithUserAgent = (0, import_provider_utils16.withUserAgentSuffix)(
6363
+ headers != null ? headers : {},
6364
+ `ai/${VERSION}`
6365
+ );
6342
6366
  const { retry } = prepareRetries({
6343
6367
  maxRetries: maxRetriesArg,
6344
6368
  abortSignal
@@ -6359,7 +6383,7 @@ async function generateImage({
6359
6383
  prompt,
6360
6384
  n: callImageCount,
6361
6385
  abortSignal,
6362
- headers,
6386
+ headers: headersWithUserAgent,
6363
6387
  size,
6364
6388
  aspectRatio,
6365
6389
  seed,
@@ -6431,7 +6455,7 @@ async function invokeModelMaxImagesPerCall(model) {
6431
6455
  }
6432
6456
 
6433
6457
  // src/generate-object/generate-object.ts
6434
- var import_provider_utils16 = require("@ai-sdk/provider-utils");
6458
+ var import_provider_utils19 = require("@ai-sdk/provider-utils");
6435
6459
 
6436
6460
  // src/generate-text/extract-reasoning-content.ts
6437
6461
  function extractReasoningContent(content) {
@@ -6443,7 +6467,7 @@ function extractReasoningContent(content) {
6443
6467
 
6444
6468
  // src/generate-object/output-strategy.ts
6445
6469
  var import_provider24 = require("@ai-sdk/provider");
6446
- var import_provider_utils14 = require("@ai-sdk/provider-utils");
6470
+ var import_provider_utils17 = require("@ai-sdk/provider-utils");
6447
6471
  var noSchemaOutputStrategy = {
6448
6472
  type: "no-schema",
6449
6473
  jsonSchema: void 0,
@@ -6482,7 +6506,7 @@ var objectOutputStrategy = (schema) => ({
6482
6506
  };
6483
6507
  },
6484
6508
  async validateFinalResult(value) {
6485
- return (0, import_provider_utils14.safeValidateTypes)({ value, schema });
6509
+ return (0, import_provider_utils17.safeValidateTypes)({ value, schema });
6486
6510
  },
6487
6511
  createElementStream() {
6488
6512
  throw new import_provider24.UnsupportedFunctionalityError({
@@ -6526,7 +6550,7 @@ var arrayOutputStrategy = (schema) => {
6526
6550
  const resultArray = [];
6527
6551
  for (let i = 0; i < inputArray.length; i++) {
6528
6552
  const element = inputArray[i];
6529
- const result = await (0, import_provider_utils14.safeValidateTypes)({ value: element, schema });
6553
+ const result = await (0, import_provider_utils17.safeValidateTypes)({ value: element, schema });
6530
6554
  if (i === inputArray.length - 1 && !isFinalDelta) {
6531
6555
  continue;
6532
6556
  }
@@ -6567,7 +6591,7 @@ var arrayOutputStrategy = (schema) => {
6567
6591
  }
6568
6592
  const inputArray = value.elements;
6569
6593
  for (const element of inputArray) {
6570
- const result = await (0, import_provider_utils14.safeValidateTypes)({ value: element, schema });
6594
+ const result = await (0, import_provider_utils17.safeValidateTypes)({ value: element, schema });
6571
6595
  if (!result.success) {
6572
6596
  return result;
6573
6597
  }
@@ -6685,9 +6709,9 @@ function getOutputStrategy({
6685
6709
  }) {
6686
6710
  switch (output) {
6687
6711
  case "object":
6688
- return objectOutputStrategy((0, import_provider_utils14.asSchema)(schema));
6712
+ return objectOutputStrategy((0, import_provider_utils17.asSchema)(schema));
6689
6713
  case "array":
6690
- return arrayOutputStrategy((0, import_provider_utils14.asSchema)(schema));
6714
+ return arrayOutputStrategy((0, import_provider_utils17.asSchema)(schema));
6691
6715
  case "enum":
6692
6716
  return enumOutputStrategy(enumValues);
6693
6717
  case "no-schema":
@@ -6701,9 +6725,9 @@ function getOutputStrategy({
6701
6725
 
6702
6726
  // src/generate-object/parse-and-validate-object-result.ts
6703
6727
  var import_provider25 = require("@ai-sdk/provider");
6704
- var import_provider_utils15 = require("@ai-sdk/provider-utils");
6728
+ var import_provider_utils18 = require("@ai-sdk/provider-utils");
6705
6729
  async function parseAndValidateObjectResult(result, outputStrategy, context) {
6706
- const parseResult = await (0, import_provider_utils15.safeParseJSON)({ text: result });
6730
+ const parseResult = await (0, import_provider_utils18.safeParseJSON)({ text: result });
6707
6731
  if (!parseResult.success) {
6708
6732
  throw new NoObjectGeneratedError({
6709
6733
  message: "No object generated: could not parse the response.",
@@ -6875,7 +6899,7 @@ function validateObjectGenerationInput({
6875
6899
  }
6876
6900
 
6877
6901
  // src/generate-object/generate-object.ts
6878
- var originalGenerateId3 = (0, import_provider_utils16.createIdGenerator)({ prefix: "aiobj", size: 24 });
6902
+ var originalGenerateId3 = (0, import_provider_utils19.createIdGenerator)({ prefix: "aiobj", size: 24 });
6879
6903
  async function generateObject(options) {
6880
6904
  const {
6881
6905
  model: modelArg,
@@ -6920,10 +6944,14 @@ async function generateObject(options) {
6920
6944
  enumValues
6921
6945
  });
6922
6946
  const callSettings = prepareCallSettings(settings);
6947
+ const headersWithUserAgent = (0, import_provider_utils19.withUserAgentSuffix)(
6948
+ headers != null ? headers : {},
6949
+ `ai/${VERSION}`
6950
+ );
6923
6951
  const baseTelemetryAttributes = getBaseTelemetryAttributes({
6924
6952
  model,
6925
6953
  telemetry,
6926
- headers,
6954
+ headers: headersWithUserAgent,
6927
6955
  settings: { ...callSettings, maxRetries }
6928
6956
  });
6929
6957
  const tracer = getTracer(telemetry);
@@ -7008,7 +7036,7 @@ async function generateObject(options) {
7008
7036
  prompt: promptMessages,
7009
7037
  providerOptions,
7010
7038
  abortSignal,
7011
- headers
7039
+ headers: headersWithUserAgent
7012
7040
  });
7013
7041
  const responseData = {
7014
7042
  id: (_b = (_a18 = result2.response) == null ? void 0 : _a18.id) != null ? _b : generateId3(),
@@ -7135,7 +7163,7 @@ var DefaultGenerateObjectResult = class {
7135
7163
  };
7136
7164
 
7137
7165
  // src/generate-object/stream-object.ts
7138
- var import_provider_utils18 = require("@ai-sdk/provider-utils");
7166
+ var import_provider_utils21 = require("@ai-sdk/provider-utils");
7139
7167
 
7140
7168
  // src/util/cosine-similarity.ts
7141
7169
  function cosineSimilarity(vector1, vector2) {
@@ -7245,7 +7273,7 @@ var SerialJobExecutor = class {
7245
7273
  };
7246
7274
 
7247
7275
  // src/util/simulate-readable-stream.ts
7248
- var import_provider_utils17 = require("@ai-sdk/provider-utils");
7276
+ var import_provider_utils20 = require("@ai-sdk/provider-utils");
7249
7277
  function simulateReadableStream({
7250
7278
  chunks,
7251
7279
  initialDelayInMs = 0,
@@ -7253,7 +7281,7 @@ function simulateReadableStream({
7253
7281
  _internal
7254
7282
  }) {
7255
7283
  var _a17;
7256
- const delay2 = (_a17 = _internal == null ? void 0 : _internal.delay) != null ? _a17 : import_provider_utils17.delay;
7284
+ const delay2 = (_a17 = _internal == null ? void 0 : _internal.delay) != null ? _a17 : import_provider_utils20.delay;
7257
7285
  let index = 0;
7258
7286
  return new ReadableStream({
7259
7287
  async pull(controller) {
@@ -7268,7 +7296,7 @@ function simulateReadableStream({
7268
7296
  }
7269
7297
 
7270
7298
  // src/generate-object/stream-object.ts
7271
- var originalGenerateId4 = (0, import_provider_utils18.createIdGenerator)({ prefix: "aiobj", size: 24 });
7299
+ var originalGenerateId4 = (0, import_provider_utils21.createIdGenerator)({ prefix: "aiobj", size: 24 });
7272
7300
  function streamObject(options) {
7273
7301
  const {
7274
7302
  model,
@@ -7788,6 +7816,9 @@ var DefaultStreamObjectResult = class {
7788
7816
  }
7789
7817
  };
7790
7818
 
7819
+ // src/generate-speech/generate-speech.ts
7820
+ var import_provider_utils22 = require("@ai-sdk/provider-utils");
7821
+
7791
7822
  // src/error/no-speech-generated-error.ts
7792
7823
  var import_provider26 = require("@ai-sdk/provider");
7793
7824
  var NoSpeechGeneratedError = class extends import_provider26.AISDKError {
@@ -7847,6 +7878,10 @@ async function generateSpeech({
7847
7878
  modelId: model.modelId
7848
7879
  });
7849
7880
  }
7881
+ const headersWithUserAgent = (0, import_provider_utils22.withUserAgentSuffix)(
7882
+ headers != null ? headers : {},
7883
+ `ai/${VERSION}`
7884
+ );
7850
7885
  const { retry } = prepareRetries({
7851
7886
  maxRetries: maxRetriesArg,
7852
7887
  abortSignal
@@ -7860,7 +7895,7 @@ async function generateSpeech({
7860
7895
  speed,
7861
7896
  language,
7862
7897
  abortSignal,
7863
- headers,
7898
+ headers: headersWithUserAgent,
7864
7899
  providerOptions
7865
7900
  })
7866
7901
  );
@@ -7897,7 +7932,7 @@ __export(output_exports, {
7897
7932
  object: () => object,
7898
7933
  text: () => text
7899
7934
  });
7900
- var import_provider_utils19 = require("@ai-sdk/provider-utils");
7935
+ var import_provider_utils23 = require("@ai-sdk/provider-utils");
7901
7936
  var text = () => ({
7902
7937
  type: "text",
7903
7938
  responseFormat: { type: "text" },
@@ -7911,7 +7946,7 @@ var text = () => ({
7911
7946
  var object = ({
7912
7947
  schema: inputSchema
7913
7948
  }) => {
7914
- const schema = (0, import_provider_utils19.asSchema)(inputSchema);
7949
+ const schema = (0, import_provider_utils23.asSchema)(inputSchema);
7915
7950
  return {
7916
7951
  type: "object",
7917
7952
  responseFormat: {
@@ -7937,7 +7972,7 @@ var object = ({
7937
7972
  }
7938
7973
  },
7939
7974
  async parseOutput({ text: text2 }, context) {
7940
- const parseResult = await (0, import_provider_utils19.safeParseJSON)({ text: text2 });
7975
+ const parseResult = await (0, import_provider_utils23.safeParseJSON)({ text: text2 });
7941
7976
  if (!parseResult.success) {
7942
7977
  throw new NoObjectGeneratedError({
7943
7978
  message: "No object generated: could not parse the response.",
@@ -7948,7 +7983,7 @@ var object = ({
7948
7983
  finishReason: context.finishReason
7949
7984
  });
7950
7985
  }
7951
- const validationResult = await (0, import_provider_utils19.safeValidateTypes)({
7986
+ const validationResult = await (0, import_provider_utils23.safeValidateTypes)({
7952
7987
  value: parseResult.value,
7953
7988
  schema
7954
7989
  });
@@ -7968,7 +8003,7 @@ var object = ({
7968
8003
  };
7969
8004
 
7970
8005
  // src/generate-text/smooth-stream.ts
7971
- var import_provider_utils20 = require("@ai-sdk/provider-utils");
8006
+ var import_provider_utils24 = require("@ai-sdk/provider-utils");
7972
8007
  var import_provider27 = require("@ai-sdk/provider");
7973
8008
  var CHUNKING_REGEXPS = {
7974
8009
  word: /\S+\s+/m,
@@ -7977,7 +8012,7 @@ var CHUNKING_REGEXPS = {
7977
8012
  function smoothStream({
7978
8013
  delayInMs = 10,
7979
8014
  chunking = "word",
7980
- _internal: { delay: delay2 = import_provider_utils20.delay } = {}
8015
+ _internal: { delay: delay2 = import_provider_utils24.delay } = {}
7981
8016
  } = {}) {
7982
8017
  let detectChunk;
7983
8018
  if (typeof chunking === "function") {
@@ -8576,10 +8611,10 @@ var DefaultProviderRegistry = class {
8576
8611
  };
8577
8612
 
8578
8613
  // src/tool/mcp/mcp-client.ts
8579
- var import_provider_utils22 = require("@ai-sdk/provider-utils");
8614
+ var import_provider_utils26 = require("@ai-sdk/provider-utils");
8580
8615
 
8581
8616
  // src/tool/mcp/mcp-sse-transport.ts
8582
- var import_provider_utils21 = require("@ai-sdk/provider-utils");
8617
+ var import_provider_utils25 = require("@ai-sdk/provider-utils");
8583
8618
 
8584
8619
  // src/tool/mcp/json-rpc-message.ts
8585
8620
  var import_v49 = require("zod/v4");
@@ -8751,7 +8786,7 @@ var SseMCPTransport = class {
8751
8786
  (_b = this.onerror) == null ? void 0 : _b.call(this, error);
8752
8787
  return reject(error);
8753
8788
  }
8754
- const stream = response.body.pipeThrough(new TextDecoderStream()).pipeThrough(new import_provider_utils21.EventSourceParserStream());
8789
+ const stream = response.body.pipeThrough(new TextDecoderStream()).pipeThrough(new import_provider_utils25.EventSourceParserStream());
8755
8790
  const reader = stream.getReader();
8756
8791
  const processEvents = async () => {
8757
8792
  var _a18, _b2, _c2;
@@ -9081,15 +9116,15 @@ var DefaultMCPClient = class {
9081
9116
  (_a18 = options == null ? void 0 : options.abortSignal) == null ? void 0 : _a18.throwIfAborted();
9082
9117
  return self.callTool({ name: name17, args, options });
9083
9118
  };
9084
- const toolWithExecute = schemas === "automatic" ? (0, import_provider_utils22.dynamicTool)({
9119
+ const toolWithExecute = schemas === "automatic" ? (0, import_provider_utils26.dynamicTool)({
9085
9120
  description,
9086
- inputSchema: (0, import_provider_utils22.jsonSchema)({
9121
+ inputSchema: (0, import_provider_utils26.jsonSchema)({
9087
9122
  ...inputSchema,
9088
9123
  properties: (_a17 = inputSchema.properties) != null ? _a17 : {},
9089
9124
  additionalProperties: false
9090
9125
  }),
9091
9126
  execute
9092
- }) : (0, import_provider_utils22.tool)({
9127
+ }) : (0, import_provider_utils26.tool)({
9093
9128
  description,
9094
9129
  inputSchema: schemas[name17].inputSchema,
9095
9130
  execute
@@ -9138,6 +9173,9 @@ var DefaultMCPClient = class {
9138
9173
  }
9139
9174
  };
9140
9175
 
9176
+ // src/transcribe/transcribe.ts
9177
+ var import_provider_utils27 = require("@ai-sdk/provider-utils");
9178
+
9141
9179
  // src/error/no-transcript-generated-error.ts
9142
9180
  var import_provider31 = require("@ai-sdk/provider");
9143
9181
  var NoTranscriptGeneratedError = class extends import_provider31.AISDKError {
@@ -9170,6 +9208,10 @@ async function transcribe({
9170
9208
  maxRetries: maxRetriesArg,
9171
9209
  abortSignal
9172
9210
  });
9211
+ const headersWithUserAgent = (0, import_provider_utils27.withUserAgentSuffix)(
9212
+ headers != null ? headers : {},
9213
+ `ai/${VERSION}`
9214
+ );
9173
9215
  const audioData = audio instanceof URL ? (await download({ url: audio })).data : convertDataContentToUint8Array(audio);
9174
9216
  const result = await retry(
9175
9217
  () => {
@@ -9177,7 +9219,7 @@ async function transcribe({
9177
9219
  return model.doGenerate({
9178
9220
  audio: audioData,
9179
9221
  abortSignal,
9180
- headers,
9222
+ headers: headersWithUserAgent,
9181
9223
  providerOptions,
9182
9224
  mediaType: (_a17 = detectMediaType({
9183
9225
  data: audioData,
@@ -9214,7 +9256,7 @@ var DefaultTranscriptionResult = class {
9214
9256
  };
9215
9257
 
9216
9258
  // src/ui/call-completion-api.ts
9217
- var import_provider_utils23 = require("@ai-sdk/provider-utils");
9259
+ var import_provider_utils28 = require("@ai-sdk/provider-utils");
9218
9260
 
9219
9261
  // src/ui/process-text-stream.ts
9220
9262
  async function processTextStream({
@@ -9292,7 +9334,7 @@ async function callCompletionApi({
9292
9334
  }
9293
9335
  case "data": {
9294
9336
  await consumeStream({
9295
- stream: (0, import_provider_utils23.parseJsonEventStream)({
9337
+ stream: (0, import_provider_utils28.parseJsonEventStream)({
9296
9338
  stream: response.body,
9297
9339
  schema: uiMessageChunkSchema
9298
9340
  }).pipeThrough(
@@ -9344,7 +9386,7 @@ async function callCompletionApi({
9344
9386
  }
9345
9387
 
9346
9388
  // src/ui/chat.ts
9347
- var import_provider_utils26 = require("@ai-sdk/provider-utils");
9389
+ var import_provider_utils31 = require("@ai-sdk/provider-utils");
9348
9390
 
9349
9391
  // src/ui/convert-file-list-to-file-ui-parts.ts
9350
9392
  async function convertFileListToFileUIParts(files) {
@@ -9377,10 +9419,10 @@ async function convertFileListToFileUIParts(files) {
9377
9419
  }
9378
9420
 
9379
9421
  // src/ui/default-chat-transport.ts
9380
- var import_provider_utils25 = require("@ai-sdk/provider-utils");
9422
+ var import_provider_utils30 = require("@ai-sdk/provider-utils");
9381
9423
 
9382
9424
  // src/ui/http-chat-transport.ts
9383
- var import_provider_utils24 = require("@ai-sdk/provider-utils");
9425
+ var import_provider_utils29 = require("@ai-sdk/provider-utils");
9384
9426
  var HttpChatTransport = class {
9385
9427
  constructor({
9386
9428
  api = "/api/chat",
@@ -9404,9 +9446,9 @@ var HttpChatTransport = class {
9404
9446
  ...options
9405
9447
  }) {
9406
9448
  var _a17, _b, _c, _d, _e;
9407
- const resolvedBody = await (0, import_provider_utils24.resolve)(this.body);
9408
- const resolvedHeaders = await (0, import_provider_utils24.resolve)(this.headers);
9409
- const resolvedCredentials = await (0, import_provider_utils24.resolve)(this.credentials);
9449
+ const resolvedBody = await (0, import_provider_utils29.resolve)(this.body);
9450
+ const resolvedHeaders = await (0, import_provider_utils29.resolve)(this.headers);
9451
+ const resolvedCredentials = await (0, import_provider_utils29.resolve)(this.credentials);
9410
9452
  const preparedRequest = await ((_a17 = this.prepareSendMessagesRequest) == null ? void 0 : _a17.call(this, {
9411
9453
  api: this.api,
9412
9454
  id: options.chatId,
@@ -9452,9 +9494,9 @@ var HttpChatTransport = class {
9452
9494
  }
9453
9495
  async reconnectToStream(options) {
9454
9496
  var _a17, _b, _c, _d, _e;
9455
- const resolvedBody = await (0, import_provider_utils24.resolve)(this.body);
9456
- const resolvedHeaders = await (0, import_provider_utils24.resolve)(this.headers);
9457
- const resolvedCredentials = await (0, import_provider_utils24.resolve)(this.credentials);
9497
+ const resolvedBody = await (0, import_provider_utils29.resolve)(this.body);
9498
+ const resolvedHeaders = await (0, import_provider_utils29.resolve)(this.headers);
9499
+ const resolvedCredentials = await (0, import_provider_utils29.resolve)(this.credentials);
9458
9500
  const preparedRequest = await ((_a17 = this.prepareReconnectToStreamRequest) == null ? void 0 : _a17.call(this, {
9459
9501
  api: this.api,
9460
9502
  id: options.chatId,
@@ -9493,7 +9535,7 @@ var DefaultChatTransport = class extends HttpChatTransport {
9493
9535
  super(options);
9494
9536
  }
9495
9537
  processResponseStream(stream) {
9496
- return (0, import_provider_utils25.parseJsonEventStream)({
9538
+ return (0, import_provider_utils30.parseJsonEventStream)({
9497
9539
  stream,
9498
9540
  schema: uiMessageChunkSchema
9499
9541
  }).pipeThrough(
@@ -9512,7 +9554,7 @@ var DefaultChatTransport = class extends HttpChatTransport {
9512
9554
  // src/ui/chat.ts
9513
9555
  var AbstractChat = class {
9514
9556
  constructor({
9515
- generateId: generateId3 = import_provider_utils26.generateId,
9557
+ generateId: generateId3 = import_provider_utils31.generateId,
9516
9558
  id = generateId3(),
9517
9559
  transport = new DefaultChatTransport(),
9518
9560
  messageMetadataSchema,
@@ -9894,7 +9936,7 @@ var TextStreamChatTransport = class extends HttpChatTransport {
9894
9936
 
9895
9937
  // src/ui/validate-ui-messages.ts
9896
9938
  var import_provider32 = require("@ai-sdk/provider");
9897
- var import_provider_utils27 = require("@ai-sdk/provider-utils");
9939
+ var import_provider_utils32 = require("@ai-sdk/provider-utils");
9898
9940
  var import_v410 = require("zod/v4");
9899
9941
  var textUIPartSchema = import_v410.z.object({
9900
9942
  type: import_v410.z.literal("text"),
@@ -9985,6 +10027,7 @@ var toolUIPartSchemas = [
9985
10027
  type: import_v410.z.string().startsWith("tool-"),
9986
10028
  toolCallId: import_v410.z.string(),
9987
10029
  state: import_v410.z.literal("input-streaming"),
10030
+ providerExecuted: import_v410.z.boolean().optional(),
9988
10031
  input: import_v410.z.unknown().optional(),
9989
10032
  output: import_v410.z.never().optional(),
9990
10033
  errorText: import_v410.z.never().optional()
@@ -9993,6 +10036,7 @@ var toolUIPartSchemas = [
9993
10036
  type: import_v410.z.string().startsWith("tool-"),
9994
10037
  toolCallId: import_v410.z.string(),
9995
10038
  state: import_v410.z.literal("input-available"),
10039
+ providerExecuted: import_v410.z.boolean().optional(),
9996
10040
  input: import_v410.z.unknown(),
9997
10041
  output: import_v410.z.never().optional(),
9998
10042
  errorText: import_v410.z.never().optional(),
@@ -10002,6 +10046,7 @@ var toolUIPartSchemas = [
10002
10046
  type: import_v410.z.string().startsWith("tool-"),
10003
10047
  toolCallId: import_v410.z.string(),
10004
10048
  state: import_v410.z.literal("output-available"),
10049
+ providerExecuted: import_v410.z.boolean().optional(),
10005
10050
  input: import_v410.z.unknown(),
10006
10051
  output: import_v410.z.unknown(),
10007
10052
  errorText: import_v410.z.never().optional(),
@@ -10012,6 +10057,7 @@ var toolUIPartSchemas = [
10012
10057
  type: import_v410.z.string().startsWith("tool-"),
10013
10058
  toolCallId: import_v410.z.string(),
10014
10059
  state: import_v410.z.literal("output-error"),
10060
+ providerExecuted: import_v410.z.boolean().optional(),
10015
10061
  input: import_v410.z.unknown(),
10016
10062
  output: import_v410.z.never().optional(),
10017
10063
  errorText: import_v410.z.string(),
@@ -10049,13 +10095,13 @@ async function validateUIMessages({
10049
10095
  message: "messages parameter must be provided"
10050
10096
  });
10051
10097
  }
10052
- const validatedMessages = await (0, import_provider_utils27.validateTypes)({
10098
+ const validatedMessages = await (0, import_provider_utils32.validateTypes)({
10053
10099
  value: messages,
10054
10100
  schema: import_v410.z.array(uiMessageSchema)
10055
10101
  });
10056
10102
  if (metadataSchema) {
10057
10103
  for (const message of validatedMessages) {
10058
- await (0, import_provider_utils27.validateTypes)({
10104
+ await (0, import_provider_utils32.validateTypes)({
10059
10105
  value: message.metadata,
10060
10106
  schema: metadataSchema
10061
10107
  });
@@ -10075,7 +10121,7 @@ async function validateUIMessages({
10075
10121
  cause: `No data schema found for data part ${dataName}`
10076
10122
  });
10077
10123
  }
10078
- await (0, import_provider_utils27.validateTypes)({
10124
+ await (0, import_provider_utils32.validateTypes)({
10079
10125
  value: dataPart.data,
10080
10126
  schema: dataSchema
10081
10127
  });
@@ -10097,13 +10143,13 @@ async function validateUIMessages({
10097
10143
  });
10098
10144
  }
10099
10145
  if (toolPart.state === "input-available" || toolPart.state === "output-available" || toolPart.state === "output-error") {
10100
- await (0, import_provider_utils27.validateTypes)({
10146
+ await (0, import_provider_utils32.validateTypes)({
10101
10147
  value: toolPart.input,
10102
10148
  schema: tool3.inputSchema
10103
10149
  });
10104
10150
  }
10105
10151
  if (toolPart.state === "output-available" && tool3.outputSchema) {
10106
- await (0, import_provider_utils27.validateTypes)({
10152
+ await (0, import_provider_utils32.validateTypes)({
10107
10153
  value: toolPart.output,
10108
10154
  schema: tool3.outputSchema
10109
10155
  });
@@ -10115,13 +10161,13 @@ async function validateUIMessages({
10115
10161
  }
10116
10162
 
10117
10163
  // src/ui-message-stream/create-ui-message-stream.ts
10118
- var import_provider_utils28 = require("@ai-sdk/provider-utils");
10164
+ var import_provider_utils33 = require("@ai-sdk/provider-utils");
10119
10165
  function createUIMessageStream({
10120
10166
  execute,
10121
- onError = import_provider_utils28.getErrorMessage,
10167
+ onError = import_provider_utils33.getErrorMessage,
10122
10168
  originalMessages,
10123
10169
  onFinish,
10124
- generateId: generateId3 = import_provider_utils28.generateId
10170
+ generateId: generateId3 = import_provider_utils33.generateId
10125
10171
  }) {
10126
10172
  let controller;
10127
10173
  const ongoingStreamPromises = [];