ai 5.0.42 → 5.0.43

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -20,7 +20,8 @@ import {
20
20
  import {
21
21
  createIdGenerator,
22
22
  executeTool,
23
- getErrorMessage as getErrorMessage5
23
+ getErrorMessage as getErrorMessage5,
24
+ withUserAgentSuffix
24
25
  } from "@ai-sdk/provider-utils";
25
26
 
26
27
  // src/error/no-output-specified-error.ts
@@ -2082,6 +2083,9 @@ function toResponseMessages({
2082
2083
  return responseMessages;
2083
2084
  }
2084
2085
 
2086
+ // src/version.ts
2087
+ var VERSION = true ? "5.0.43" : "0.0.0-test";
2088
+
2085
2089
  // src/generate-text/generate-text.ts
2086
2090
  var originalGenerateId = createIdGenerator({
2087
2091
  prefix: "aitxt",
@@ -2122,10 +2126,14 @@ async function generateText({
2122
2126
  abortSignal
2123
2127
  });
2124
2128
  const callSettings = prepareCallSettings(settings);
2129
+ const headersWithUserAgent = withUserAgentSuffix(
2130
+ headers != null ? headers : {},
2131
+ `ai/${VERSION}`
2132
+ );
2125
2133
  const baseTelemetryAttributes = getBaseTelemetryAttributes({
2126
2134
  model,
2127
2135
  telemetry,
2128
- headers,
2136
+ headers: headersWithUserAgent,
2129
2137
  settings: { ...callSettings, maxRetries }
2130
2138
  });
2131
2139
  const initialPrompt = await standardizePrompt({
@@ -2240,7 +2248,7 @@ async function generateText({
2240
2248
  prompt: promptMessages,
2241
2249
  providerOptions,
2242
2250
  abortSignal,
2243
- headers
2251
+ headers: headersWithUserAgent
2244
2252
  });
2245
2253
  const responseData = {
2246
2254
  id: (_b2 = (_a19 = result.response) == null ? void 0 : _a19.id) != null ? _b2 : generateId3(),
@@ -5897,6 +5905,7 @@ var Agent = class {
5897
5905
  };
5898
5906
 
5899
5907
  // src/embed/embed.ts
5908
+ import { withUserAgentSuffix as withUserAgentSuffix2 } from "@ai-sdk/provider-utils";
5900
5909
  async function embed({
5901
5910
  model: modelArg,
5902
5911
  value,
@@ -5911,10 +5920,14 @@ async function embed({
5911
5920
  maxRetries: maxRetriesArg,
5912
5921
  abortSignal
5913
5922
  });
5923
+ const headersWithUserAgent = withUserAgentSuffix2(
5924
+ headers != null ? headers : {},
5925
+ `ai/${VERSION}`
5926
+ );
5914
5927
  const baseTelemetryAttributes = getBaseTelemetryAttributes({
5915
5928
  model,
5916
5929
  telemetry,
5917
- headers,
5930
+ headers: headersWithUserAgent,
5918
5931
  settings: { maxRetries }
5919
5932
  });
5920
5933
  const tracer = getTracer(telemetry);
@@ -5953,7 +5966,7 @@ async function embed({
5953
5966
  const modelResponse = await model.doEmbed({
5954
5967
  values: [value],
5955
5968
  abortSignal,
5956
- headers,
5969
+ headers: headersWithUserAgent,
5957
5970
  providerOptions
5958
5971
  });
5959
5972
  const embedding2 = modelResponse.embeddings[0];
@@ -6010,6 +6023,9 @@ var DefaultEmbedResult = class {
6010
6023
  }
6011
6024
  };
6012
6025
 
6026
+ // src/embed/embed-many.ts
6027
+ import { withUserAgentSuffix as withUserAgentSuffix3 } from "@ai-sdk/provider-utils";
6028
+
6013
6029
  // src/util/split-array.ts
6014
6030
  function splitArray(array, chunkSize) {
6015
6031
  if (chunkSize <= 0) {
@@ -6038,10 +6054,14 @@ async function embedMany({
6038
6054
  maxRetries: maxRetriesArg,
6039
6055
  abortSignal
6040
6056
  });
6057
+ const headersWithUserAgent = withUserAgentSuffix3(
6058
+ headers != null ? headers : {},
6059
+ `ai/${VERSION}`
6060
+ );
6041
6061
  const baseTelemetryAttributes = getBaseTelemetryAttributes({
6042
6062
  model,
6043
6063
  telemetry,
6044
- headers,
6064
+ headers: headersWithUserAgent,
6045
6065
  settings: { maxRetries }
6046
6066
  });
6047
6067
  const tracer = getTracer(telemetry);
@@ -6090,7 +6110,7 @@ async function embedMany({
6090
6110
  const modelResponse = await model.doEmbed({
6091
6111
  values,
6092
6112
  abortSignal,
6093
- headers,
6113
+ headers: headersWithUserAgent,
6094
6114
  providerOptions
6095
6115
  });
6096
6116
  const embeddings3 = modelResponse.embeddings;
@@ -6172,7 +6192,7 @@ async function embedMany({
6172
6192
  const modelResponse = await model.doEmbed({
6173
6193
  values: chunk,
6174
6194
  abortSignal,
6175
- headers,
6195
+ headers: headersWithUserAgent,
6176
6196
  providerOptions
6177
6197
  });
6178
6198
  const embeddings2 = modelResponse.embeddings;
@@ -6253,6 +6273,7 @@ var DefaultEmbedManyResult = class {
6253
6273
  };
6254
6274
 
6255
6275
  // src/generate-image/generate-image.ts
6276
+ import { withUserAgentSuffix as withUserAgentSuffix4 } from "@ai-sdk/provider-utils";
6256
6277
  async function generateImage({
6257
6278
  model,
6258
6279
  prompt,
@@ -6274,6 +6295,10 @@ async function generateImage({
6274
6295
  modelId: model.modelId
6275
6296
  });
6276
6297
  }
6298
+ const headersWithUserAgent = withUserAgentSuffix4(
6299
+ headers != null ? headers : {},
6300
+ `ai/${VERSION}`
6301
+ );
6277
6302
  const { retry } = prepareRetries({
6278
6303
  maxRetries: maxRetriesArg,
6279
6304
  abortSignal
@@ -6294,7 +6319,7 @@ async function generateImage({
6294
6319
  prompt,
6295
6320
  n: callImageCount,
6296
6321
  abortSignal,
6297
- headers,
6322
+ headers: headersWithUserAgent,
6298
6323
  size,
6299
6324
  aspectRatio,
6300
6325
  seed,
@@ -6367,7 +6392,8 @@ async function invokeModelMaxImagesPerCall(model) {
6367
6392
 
6368
6393
  // src/generate-object/generate-object.ts
6369
6394
  import {
6370
- createIdGenerator as createIdGenerator3
6395
+ createIdGenerator as createIdGenerator3,
6396
+ withUserAgentSuffix as withUserAgentSuffix5
6371
6397
  } from "@ai-sdk/provider-utils";
6372
6398
 
6373
6399
  // src/generate-text/extract-reasoning-content.ts
@@ -6865,10 +6891,14 @@ async function generateObject(options) {
6865
6891
  enumValues
6866
6892
  });
6867
6893
  const callSettings = prepareCallSettings(settings);
6894
+ const headersWithUserAgent = withUserAgentSuffix5(
6895
+ headers != null ? headers : {},
6896
+ `ai/${VERSION}`
6897
+ );
6868
6898
  const baseTelemetryAttributes = getBaseTelemetryAttributes({
6869
6899
  model,
6870
6900
  telemetry,
6871
- headers,
6901
+ headers: headersWithUserAgent,
6872
6902
  settings: { ...callSettings, maxRetries }
6873
6903
  });
6874
6904
  const tracer = getTracer(telemetry);
@@ -6953,7 +6983,7 @@ async function generateObject(options) {
6953
6983
  prompt: promptMessages,
6954
6984
  providerOptions,
6955
6985
  abortSignal,
6956
- headers
6986
+ headers: headersWithUserAgent
6957
6987
  });
6958
6988
  const responseData = {
6959
6989
  id: (_b = (_a18 = result2.response) == null ? void 0 : _a18.id) != null ? _b : generateId3(),
@@ -7735,6 +7765,9 @@ var DefaultStreamObjectResult = class {
7735
7765
  }
7736
7766
  };
7737
7767
 
7768
+ // src/generate-speech/generate-speech.ts
7769
+ import { withUserAgentSuffix as withUserAgentSuffix6 } from "@ai-sdk/provider-utils";
7770
+
7738
7771
  // src/error/no-speech-generated-error.ts
7739
7772
  import { AISDKError as AISDKError20 } from "@ai-sdk/provider";
7740
7773
  var NoSpeechGeneratedError = class extends AISDKError20 {
@@ -7794,6 +7827,10 @@ async function generateSpeech({
7794
7827
  modelId: model.modelId
7795
7828
  });
7796
7829
  }
7830
+ const headersWithUserAgent = withUserAgentSuffix6(
7831
+ headers != null ? headers : {},
7832
+ `ai/${VERSION}`
7833
+ );
7797
7834
  const { retry } = prepareRetries({
7798
7835
  maxRetries: maxRetriesArg,
7799
7836
  abortSignal
@@ -7807,7 +7844,7 @@ async function generateSpeech({
7807
7844
  speed,
7808
7845
  language,
7809
7846
  abortSignal,
7810
- headers,
7847
+ headers: headersWithUserAgent,
7811
7848
  providerOptions
7812
7849
  })
7813
7850
  );
@@ -9097,6 +9134,9 @@ var DefaultMCPClient = class {
9097
9134
  }
9098
9135
  };
9099
9136
 
9137
+ // src/transcribe/transcribe.ts
9138
+ import { withUserAgentSuffix as withUserAgentSuffix7 } from "@ai-sdk/provider-utils";
9139
+
9100
9140
  // src/error/no-transcript-generated-error.ts
9101
9141
  import { AISDKError as AISDKError22 } from "@ai-sdk/provider";
9102
9142
  var NoTranscriptGeneratedError = class extends AISDKError22 {
@@ -9129,6 +9169,10 @@ async function transcribe({
9129
9169
  maxRetries: maxRetriesArg,
9130
9170
  abortSignal
9131
9171
  });
9172
+ const headersWithUserAgent = withUserAgentSuffix7(
9173
+ headers != null ? headers : {},
9174
+ `ai/${VERSION}`
9175
+ );
9132
9176
  const audioData = audio instanceof URL ? (await download({ url: audio })).data : convertDataContentToUint8Array(audio);
9133
9177
  const result = await retry(
9134
9178
  () => {
@@ -9136,7 +9180,7 @@ async function transcribe({
9136
9180
  return model.doGenerate({
9137
9181
  audio: audioData,
9138
9182
  abortSignal,
9139
- headers,
9183
+ headers: headersWithUserAgent,
9140
9184
  providerOptions,
9141
9185
  mediaType: (_a17 = detectMediaType({
9142
9186
  data: audioData,