ai 4.3.8 → 4.3.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -862,6 +862,7 @@ var DefaultGeneratedFileWithType = class extends DefaultGeneratedFile {
862
862
  };
863
863
 
864
864
  // core/util/detect-mimetype.ts
865
+ import { convertBase64ToUint8Array as convertBase64ToUint8Array2 } from "@ai-sdk/provider-utils";
865
866
  var imageMimeTypeSignatures = [
866
867
  {
867
868
  mimeType: "image/gif",
@@ -967,12 +968,26 @@ var audioMimeTypeSignatures = [
967
968
  base64Prefix: "ZnR5cA"
968
969
  }
969
970
  ];
971
+ var stripID3 = (data) => {
972
+ const bytes = typeof data === "string" ? convertBase64ToUint8Array2(data) : data;
973
+ const id3Size = (bytes[6] & 127) << 21 | (bytes[7] & 127) << 14 | (bytes[8] & 127) << 7 | bytes[9] & 127;
974
+ return bytes.slice(id3Size + 10);
975
+ };
976
+ function stripID3TagsIfPresent(data) {
977
+ const hasId3 = typeof data === "string" && data.startsWith("SUQz") || typeof data !== "string" && data.length > 10 && data[0] === 73 && // 'I'
978
+ data[1] === 68 && // 'D'
979
+ data[2] === 51;
980
+ return hasId3 ? stripID3(data) : data;
981
+ }
970
982
  function detectMimeType({
971
983
  data,
972
984
  signatures
973
985
  }) {
986
+ const processedData = stripID3TagsIfPresent(data);
974
987
  for (const signature of signatures) {
975
- if (typeof data === "string" ? data.startsWith(signature.base64Prefix) : data.length >= signature.bytesPrefix.length && signature.bytesPrefix.every((byte, index) => data[index] === byte)) {
988
+ if (typeof processedData === "string" ? processedData.startsWith(signature.base64Prefix) : processedData.length >= signature.bytesPrefix.length && signature.bytesPrefix.every(
989
+ (byte, index) => processedData[index] === byte
990
+ )) {
976
991
  return signature.mimeType;
977
992
  }
978
993
  }
@@ -1144,7 +1159,7 @@ async function download({ url }) {
1144
1159
 
1145
1160
  // core/prompt/data-content.ts
1146
1161
  import {
1147
- convertBase64ToUint8Array as convertBase64ToUint8Array2,
1162
+ convertBase64ToUint8Array as convertBase64ToUint8Array3,
1148
1163
  convertUint8ArrayToBase64 as convertUint8ArrayToBase642
1149
1164
  } from "@ai-sdk/provider-utils";
1150
1165
 
@@ -1200,7 +1215,7 @@ function convertDataContentToUint8Array(content) {
1200
1215
  }
1201
1216
  if (typeof content === "string") {
1202
1217
  try {
1203
- return convertBase64ToUint8Array2(content);
1218
+ return convertBase64ToUint8Array3(content);
1204
1219
  } catch (error) {
1205
1220
  throw new InvalidDataContentError({
1206
1221
  message: "Invalid data content. Content string is not a base64-encoded media.",
@@ -4091,6 +4106,7 @@ async function generateText({
4091
4106
  experimental_providerMetadata,
4092
4107
  providerOptions = experimental_providerMetadata,
4093
4108
  experimental_activeTools: activeTools,
4109
+ experimental_prepareStep: prepareStep,
4094
4110
  experimental_repairToolCall: repairToolCall,
4095
4111
  _internal: {
4096
4112
  generateId: generateId3 = originalGenerateId3,
@@ -4133,6 +4149,9 @@ async function generateText({
4133
4149
  telemetry
4134
4150
  }),
4135
4151
  ...baseTelemetryAttributes,
4152
+ // model:
4153
+ "ai.model.provider": model.provider,
4154
+ "ai.model.id": model.modelId,
4136
4155
  // specific settings that only make sense on the outer level:
4137
4156
  "ai.prompt": {
4138
4157
  input: () => JSON.stringify({ system, prompt, messages })
@@ -4142,11 +4161,7 @@ async function generateText({
4142
4161
  }),
4143
4162
  tracer,
4144
4163
  fn: async (span) => {
4145
- var _a18, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k;
4146
- const mode = {
4147
- type: "regular",
4148
- ...prepareToolsAndToolChoice({ tools, toolChoice, activeTools })
4149
- };
4164
+ var _a18, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n;
4150
4165
  const callSettings = prepareCallSettings(settings);
4151
4166
  let currentModelResponse;
4152
4167
  let currentToolCalls = [];
@@ -4169,16 +4184,33 @@ async function generateText({
4169
4184
  ...initialPrompt.messages,
4170
4185
  ...responseMessages
4171
4186
  ];
4187
+ const prepareStepResult = await (prepareStep == null ? void 0 : prepareStep({
4188
+ model,
4189
+ steps,
4190
+ maxSteps,
4191
+ stepNumber: stepCount
4192
+ }));
4193
+ const stepToolChoice = (_a18 = prepareStepResult == null ? void 0 : prepareStepResult.toolChoice) != null ? _a18 : toolChoice;
4194
+ const stepActiveTools = (_b = prepareStepResult == null ? void 0 : prepareStepResult.experimental_activeTools) != null ? _b : activeTools;
4195
+ const stepModel = (_c = prepareStepResult == null ? void 0 : prepareStepResult.model) != null ? _c : model;
4172
4196
  const promptMessages = await convertToLanguageModelPrompt({
4173
4197
  prompt: {
4174
4198
  type: promptFormat,
4175
4199
  system: initialPrompt.system,
4176
4200
  messages: stepInputMessages
4177
4201
  },
4178
- modelSupportsImageUrls: model.supportsImageUrls,
4179
- modelSupportsUrl: (_a18 = model.supportsUrl) == null ? void 0 : _a18.bind(model)
4202
+ modelSupportsImageUrls: stepModel.supportsImageUrls,
4203
+ modelSupportsUrl: (_d = stepModel.supportsUrl) == null ? void 0 : _d.bind(stepModel)
4180
4204
  // support 'this' context
4181
4205
  });
4206
+ const mode = {
4207
+ type: "regular",
4208
+ ...prepareToolsAndToolChoice({
4209
+ tools,
4210
+ toolChoice: stepToolChoice,
4211
+ activeTools: stepActiveTools
4212
+ })
4213
+ };
4182
4214
  currentModelResponse = await retry(
4183
4215
  () => recordSpan({
4184
4216
  name: "ai.generateText.doGenerate",
@@ -4190,6 +4222,10 @@ async function generateText({
4190
4222
  telemetry
4191
4223
  }),
4192
4224
  ...baseTelemetryAttributes,
4225
+ // model:
4226
+ "ai.model.provider": stepModel.provider,
4227
+ "ai.model.id": stepModel.modelId,
4228
+ // prompt:
4193
4229
  "ai.prompt.format": { input: () => promptFormat },
4194
4230
  "ai.prompt.messages": {
4195
4231
  input: () => JSON.stringify(promptMessages)
@@ -4205,8 +4241,8 @@ async function generateText({
4205
4241
  input: () => mode.toolChoice != null ? JSON.stringify(mode.toolChoice) : void 0
4206
4242
  },
4207
4243
  // standardized gen-ai llm span attributes:
4208
- "gen_ai.system": model.provider,
4209
- "gen_ai.request.model": model.modelId,
4244
+ "gen_ai.system": stepModel.provider,
4245
+ "gen_ai.request.model": stepModel.modelId,
4210
4246
  "gen_ai.request.frequency_penalty": settings.frequencyPenalty,
4211
4247
  "gen_ai.request.max_tokens": settings.maxTokens,
4212
4248
  "gen_ai.request.presence_penalty": settings.presencePenalty,
@@ -4219,7 +4255,7 @@ async function generateText({
4219
4255
  tracer,
4220
4256
  fn: async (span2) => {
4221
4257
  var _a19, _b2, _c2, _d2, _e2, _f2;
4222
- const result = await model.doGenerate({
4258
+ const result = await stepModel.doGenerate({
4223
4259
  mode,
4224
4260
  ...callSettings,
4225
4261
  inputFormat: promptFormat,
@@ -4232,7 +4268,7 @@ async function generateText({
4232
4268
  const responseData = {
4233
4269
  id: (_b2 = (_a19 = result.response) == null ? void 0 : _a19.id) != null ? _b2 : generateId3(),
4234
4270
  timestamp: (_d2 = (_c2 = result.response) == null ? void 0 : _c2.timestamp) != null ? _d2 : currentDate(),
4235
- modelId: (_f2 = (_e2 = result.response) == null ? void 0 : _e2.modelId) != null ? _f2 : model.modelId
4271
+ modelId: (_f2 = (_e2 = result.response) == null ? void 0 : _e2.modelId) != null ? _f2 : stepModel.modelId
4236
4272
  };
4237
4273
  span2.setAttributes(
4238
4274
  selectTelemetryAttributes({
@@ -4264,7 +4300,7 @@ async function generateText({
4264
4300
  })
4265
4301
  );
4266
4302
  currentToolCalls = await Promise.all(
4267
- ((_b = currentModelResponse.toolCalls) != null ? _b : []).map(
4303
+ ((_e = currentModelResponse.toolCalls) != null ? _e : []).map(
4268
4304
  (toolCall) => parseToolCall({
4269
4305
  toolCall,
4270
4306
  tools,
@@ -4299,7 +4335,7 @@ async function generateText({
4299
4335
  nextStepType = "tool-result";
4300
4336
  }
4301
4337
  }
4302
- const originalText = (_c = currentModelResponse.text) != null ? _c : "";
4338
+ const originalText = (_f = currentModelResponse.text) != null ? _f : "";
4303
4339
  const stepTextLeadingWhitespaceTrimmed = stepType === "continue" && // only for continue steps
4304
4340
  text2.trimEnd() !== text2 ? originalText.trimStart() : originalText;
4305
4341
  const stepText = nextStepType === "continue" ? removeTextAfterLastWhitespace(stepTextLeadingWhitespaceTrimmed) : stepTextLeadingWhitespaceTrimmed;
@@ -4307,7 +4343,7 @@ async function generateText({
4307
4343
  currentReasoningDetails = asReasoningDetails(
4308
4344
  currentModelResponse.reasoning
4309
4345
  );
4310
- sources.push(...(_d = currentModelResponse.sources) != null ? _d : []);
4346
+ sources.push(...(_g = currentModelResponse.sources) != null ? _g : []);
4311
4347
  if (stepType === "continue") {
4312
4348
  const lastMessage = responseMessages[responseMessages.length - 1];
4313
4349
  if (typeof lastMessage.content === "string") {
@@ -4339,18 +4375,18 @@ async function generateText({
4339
4375
  reasoning: asReasoningText(currentReasoningDetails),
4340
4376
  reasoningDetails: currentReasoningDetails,
4341
4377
  files: asFiles(currentModelResponse.files),
4342
- sources: (_e = currentModelResponse.sources) != null ? _e : [],
4378
+ sources: (_h = currentModelResponse.sources) != null ? _h : [],
4343
4379
  toolCalls: currentToolCalls,
4344
4380
  toolResults: currentToolResults,
4345
4381
  finishReason: currentModelResponse.finishReason,
4346
4382
  usage: currentUsage,
4347
4383
  warnings: currentModelResponse.warnings,
4348
4384
  logprobs: currentModelResponse.logprobs,
4349
- request: (_f = currentModelResponse.request) != null ? _f : {},
4385
+ request: (_i = currentModelResponse.request) != null ? _i : {},
4350
4386
  response: {
4351
4387
  ...currentModelResponse.response,
4352
- headers: (_g = currentModelResponse.rawResponse) == null ? void 0 : _g.headers,
4353
- body: (_h = currentModelResponse.rawResponse) == null ? void 0 : _h.body,
4388
+ headers: (_j = currentModelResponse.rawResponse) == null ? void 0 : _j.headers,
4389
+ body: (_k = currentModelResponse.rawResponse) == null ? void 0 : _k.body,
4354
4390
  // deep clone msgs to avoid mutating past messages in multi-step:
4355
4391
  messages: structuredClone(responseMessages)
4356
4392
  },
@@ -4402,11 +4438,11 @@ async function generateText({
4402
4438
  finishReason: currentModelResponse.finishReason,
4403
4439
  usage,
4404
4440
  warnings: currentModelResponse.warnings,
4405
- request: (_i = currentModelResponse.request) != null ? _i : {},
4441
+ request: (_l = currentModelResponse.request) != null ? _l : {},
4406
4442
  response: {
4407
4443
  ...currentModelResponse.response,
4408
- headers: (_j = currentModelResponse.rawResponse) == null ? void 0 : _j.headers,
4409
- body: (_k = currentModelResponse.rawResponse) == null ? void 0 : _k.body,
4444
+ headers: (_m = currentModelResponse.rawResponse) == null ? void 0 : _m.headers,
4445
+ body: (_n = currentModelResponse.rawResponse) == null ? void 0 : _n.body,
4410
4446
  messages: responseMessages
4411
4447
  },
4412
4448
  logprobs: currentModelResponse.logprobs,