ai 5.0.0-canary.10 → 5.0.0-canary.11

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -75,7 +75,7 @@ __export(ai_exports, {
75
75
  cosineSimilarity: () => cosineSimilarity,
76
76
  createDataStream: () => createDataStream,
77
77
  createDataStreamResponse: () => createDataStreamResponse,
78
- createIdGenerator: () => import_provider_utils21.createIdGenerator,
78
+ createIdGenerator: () => import_provider_utils22.createIdGenerator,
79
79
  createProviderRegistry: () => createProviderRegistry,
80
80
  customProvider: () => customProvider,
81
81
  defaultSettingsMiddleware: () => defaultSettingsMiddleware,
@@ -91,7 +91,7 @@ __export(ai_exports, {
91
91
  extractReasoningMiddleware: () => extractReasoningMiddleware,
92
92
  fillMessageParts: () => fillMessageParts,
93
93
  formatDataStreamPart: () => formatDataStreamPart,
94
- generateId: () => import_provider_utils21.generateId,
94
+ generateId: () => import_provider_utils22.generateId,
95
95
  generateObject: () => generateObject,
96
96
  generateText: () => generateText,
97
97
  getMessageParts: () => getMessageParts,
@@ -119,7 +119,7 @@ __export(ai_exports, {
119
119
  module.exports = __toCommonJS(ai_exports);
120
120
 
121
121
  // core/index.ts
122
- var import_provider_utils21 = require("@ai-sdk/provider-utils");
122
+ var import_provider_utils22 = require("@ai-sdk/provider-utils");
123
123
 
124
124
  // core/util/index.ts
125
125
  var import_provider_utils5 = require("@ai-sdk/provider-utils");
@@ -2679,7 +2679,7 @@ var DefaultGenerateImageResult = class {
2679
2679
 
2680
2680
  // core/generate-object/generate-object.ts
2681
2681
  var import_provider13 = require("@ai-sdk/provider");
2682
- var import_provider_utils12 = require("@ai-sdk/provider-utils");
2682
+ var import_provider_utils13 = require("@ai-sdk/provider-utils");
2683
2683
 
2684
2684
  // errors/no-object-generated-error.ts
2685
2685
  var import_provider5 = require("@ai-sdk/provider");
@@ -2709,6 +2709,17 @@ var NoObjectGeneratedError = class extends import_provider5.AISDKError {
2709
2709
  };
2710
2710
  _a4 = symbol4;
2711
2711
 
2712
+ // core/generate-text/extract-content-text.ts
2713
+ function extractContentText(content) {
2714
+ const parts = content.filter(
2715
+ (content2) => content2.type === "text"
2716
+ );
2717
+ if (parts.length === 0) {
2718
+ return void 0;
2719
+ }
2720
+ return parts.map((content2) => content2.text).join("");
2721
+ }
2722
+
2712
2723
  // util/download-error.ts
2713
2724
  var import_provider6 = require("@ai-sdk/provider");
2714
2725
  var name5 = "AI_DownloadError";
@@ -2903,17 +2914,16 @@ var InvalidMessageRoleError = class extends import_provider9.AISDKError {
2903
2914
  _a7 = symbol7;
2904
2915
 
2905
2916
  // core/prompt/convert-to-language-model-prompt.ts
2917
+ var import_provider_utils10 = require("@ai-sdk/provider-utils");
2906
2918
  async function convertToLanguageModelPrompt({
2907
2919
  prompt,
2908
- modelSupportsImageUrls = true,
2909
- modelSupportsUrl = () => false,
2920
+ supportedUrls,
2910
2921
  downloadImplementation = download
2911
2922
  }) {
2912
2923
  const downloadedAssets = await downloadAssets(
2913
2924
  prompt.messages,
2914
2925
  downloadImplementation,
2915
- modelSupportsImageUrls,
2916
- modelSupportsUrl
2926
+ supportedUrls
2917
2927
  );
2918
2928
  return [
2919
2929
  ...prompt.system != null ? [{ role: "system", content: prompt.system }] : [],
@@ -3032,19 +3042,29 @@ function convertToLanguageModelMessage(message, downloadedAssets) {
3032
3042
  }
3033
3043
  }
3034
3044
  }
3035
- async function downloadAssets(messages, downloadImplementation, modelSupportsImageUrls, modelSupportsUrl) {
3045
+ async function downloadAssets(messages, downloadImplementation, supportedUrls) {
3036
3046
  const urls = messages.filter((message) => message.role === "user").map((message) => message.content).filter(
3037
3047
  (content) => Array.isArray(content)
3038
3048
  ).flat().filter(
3039
3049
  (part) => part.type === "image" || part.type === "file"
3040
- ).filter(
3041
- (part) => !(part.type === "image" && modelSupportsImageUrls === true)
3042
- ).map((part) => part.type === "image" ? part.image : part.data).map(
3043
- (part) => (
3044
- // support string urls:
3045
- typeof part === "string" && (part.startsWith("http:") || part.startsWith("https:")) ? new URL(part) : part
3046
- )
3047
- ).filter((image) => image instanceof URL).filter((url) => !modelSupportsUrl(url));
3050
+ ).map((part) => {
3051
+ var _a17, _b;
3052
+ const mediaType = (_b = (_a17 = part.mediaType) != null ? _a17 : part.mimeType) != null ? _b : part.type === "image" ? "image/*" : void 0;
3053
+ let data = part.type === "image" ? part.image : part.data;
3054
+ if (typeof data === "string") {
3055
+ try {
3056
+ data = new URL(data);
3057
+ } catch (ignored) {
3058
+ }
3059
+ }
3060
+ return { mediaType, data };
3061
+ }).filter(
3062
+ (part) => part.data instanceof URL && part.mediaType != null && !(0, import_provider_utils10.isUrlSupported)({
3063
+ url: part.data.toString(),
3064
+ mediaType: part.mediaType,
3065
+ supportedUrls
3066
+ })
3067
+ ).map((part) => part.data);
3048
3068
  const downloadedImages = await Promise.all(
3049
3069
  urls.map(async (url) => ({
3050
3070
  url,
@@ -3198,8 +3218,7 @@ function prepareCallSettings({
3198
3218
  }
3199
3219
  return {
3200
3220
  maxOutputTokens,
3201
- // TODO v5 remove default 0 for temperature
3202
- temperature: temperature != null ? temperature : 0,
3221
+ temperature: temperature != null ? temperature : temperature === null ? void 0 : 0,
3203
3222
  topP,
3204
3223
  topK,
3205
3224
  presencePenalty,
@@ -3211,7 +3230,7 @@ function prepareCallSettings({
3211
3230
 
3212
3231
  // core/prompt/standardize-prompt.ts
3213
3232
  var import_provider11 = require("@ai-sdk/provider");
3214
- var import_provider_utils10 = require("@ai-sdk/provider-utils");
3233
+ var import_provider_utils11 = require("@ai-sdk/provider-utils");
3215
3234
  var import_zod7 = require("zod");
3216
3235
 
3217
3236
  // core/prompt/attachments-to-parts.ts
@@ -3779,7 +3798,7 @@ function standardizePrompt({
3779
3798
  message: "messages must not be empty"
3780
3799
  });
3781
3800
  }
3782
- const validationResult = (0, import_provider_utils10.safeValidateTypes)({
3801
+ const validationResult = (0, import_provider_utils11.safeValidateTypes)({
3783
3802
  value: messages,
3784
3803
  schema: import_zod7.z.array(coreMessageSchema)
3785
3804
  });
@@ -3818,29 +3837,9 @@ function addLanguageModelUsage(usage1, usage2) {
3818
3837
  };
3819
3838
  }
3820
3839
 
3821
- // core/generate-object/inject-json-instruction.ts
3822
- var DEFAULT_SCHEMA_PREFIX = "JSON schema:";
3823
- var DEFAULT_SCHEMA_SUFFIX = "You MUST answer with a JSON object that matches the JSON schema above.";
3824
- var DEFAULT_GENERIC_SUFFIX = "You MUST answer with JSON.";
3825
- function injectJsonInstruction({
3826
- prompt,
3827
- schema,
3828
- schemaPrefix = schema != null ? DEFAULT_SCHEMA_PREFIX : void 0,
3829
- schemaSuffix = schema != null ? DEFAULT_SCHEMA_SUFFIX : DEFAULT_GENERIC_SUFFIX
3830
- }) {
3831
- return [
3832
- prompt != null && prompt.length > 0 ? prompt : void 0,
3833
- prompt != null && prompt.length > 0 ? "" : void 0,
3834
- // add a newline if prompt is not null
3835
- schemaPrefix,
3836
- schema != null ? JSON.stringify(schema) : void 0,
3837
- schemaSuffix
3838
- ].filter((line) => line != null).join("\n");
3839
- }
3840
-
3841
3840
  // core/generate-object/output-strategy.ts
3842
3841
  var import_provider12 = require("@ai-sdk/provider");
3843
- var import_provider_utils11 = require("@ai-sdk/provider-utils");
3842
+ var import_provider_utils12 = require("@ai-sdk/provider-utils");
3844
3843
 
3845
3844
  // core/util/async-iterable-stream.ts
3846
3845
  function createAsyncIterableStream(source) {
@@ -3896,7 +3895,7 @@ var objectOutputStrategy = (schema) => ({
3896
3895
  };
3897
3896
  },
3898
3897
  validateFinalResult(value) {
3899
- return (0, import_provider_utils11.safeValidateTypes)({ value, schema });
3898
+ return (0, import_provider_utils12.safeValidateTypes)({ value, schema });
3900
3899
  },
3901
3900
  createElementStream() {
3902
3901
  throw new import_provider12.UnsupportedFunctionalityError({
@@ -3935,7 +3934,7 @@ var arrayOutputStrategy = (schema) => {
3935
3934
  const resultArray = [];
3936
3935
  for (let i = 0; i < inputArray.length; i++) {
3937
3936
  const element = inputArray[i];
3938
- const result = (0, import_provider_utils11.safeValidateTypes)({ value: element, schema });
3937
+ const result = (0, import_provider_utils12.safeValidateTypes)({ value: element, schema });
3939
3938
  if (i === inputArray.length - 1 && !isFinalDelta) {
3940
3939
  continue;
3941
3940
  }
@@ -3976,7 +3975,7 @@ var arrayOutputStrategy = (schema) => {
3976
3975
  }
3977
3976
  const inputArray = value.elements;
3978
3977
  for (const element of inputArray) {
3979
- const result = (0, import_provider_utils11.safeValidateTypes)({ value: element, schema });
3978
+ const result = (0, import_provider_utils12.safeValidateTypes)({ value: element, schema });
3980
3979
  if (!result.success) {
3981
3980
  return result;
3982
3981
  }
@@ -4085,7 +4084,6 @@ function getOutputStrategy({
4085
4084
  // core/generate-object/validate-object-generation-input.ts
4086
4085
  function validateObjectGenerationInput({
4087
4086
  output,
4088
- mode,
4089
4087
  schema,
4090
4088
  schemaName,
4091
4089
  schemaDescription,
@@ -4099,13 +4097,6 @@ function validateObjectGenerationInput({
4099
4097
  });
4100
4098
  }
4101
4099
  if (output === "no-schema") {
4102
- if (mode === "auto" || mode === "tool") {
4103
- throw new InvalidArgumentError({
4104
- parameter: "mode",
4105
- value: mode,
4106
- message: 'Mode must be "json" for no-schema output.'
4107
- });
4108
- }
4109
4100
  if (schema != null) {
4110
4101
  throw new InvalidArgumentError({
4111
4102
  parameter: "schema",
@@ -4208,19 +4199,8 @@ function validateObjectGenerationInput({
4208
4199
  }
4209
4200
  }
4210
4201
 
4211
- // core/generate-text/extract-content-text.ts
4212
- function extractContentText(content) {
4213
- const parts = content.filter(
4214
- (content2) => content2.type === "text"
4215
- );
4216
- if (parts.length === 0) {
4217
- return void 0;
4218
- }
4219
- return parts.map((content2) => content2.text).join("");
4220
- }
4221
-
4222
4202
  // core/generate-object/generate-object.ts
4223
- var originalGenerateId = (0, import_provider_utils12.createIdGenerator)({ prefix: "aiobj", size: 24 });
4203
+ var originalGenerateId = (0, import_provider_utils13.createIdGenerator)({ prefix: "aiobj", size: 24 });
4224
4204
  async function generateObject({
4225
4205
  model,
4226
4206
  enum: enumValues,
@@ -4228,7 +4208,6 @@ async function generateObject({
4228
4208
  schema: inputSchema,
4229
4209
  schemaName,
4230
4210
  schemaDescription,
4231
- mode,
4232
4211
  output = "object",
4233
4212
  system,
4234
4213
  prompt,
@@ -4247,7 +4226,6 @@ async function generateObject({
4247
4226
  }) {
4248
4227
  validateObjectGenerationInput({
4249
4228
  output,
4250
- mode,
4251
4229
  schema: inputSchema,
4252
4230
  schemaName,
4253
4231
  schemaDescription,
@@ -4259,14 +4237,12 @@ async function generateObject({
4259
4237
  schema: inputSchema,
4260
4238
  enumValues
4261
4239
  });
4262
- if (outputStrategy.type === "no-schema" && mode === void 0) {
4263
- mode = "json";
4264
- }
4240
+ const callSettings = prepareCallSettings(settings);
4265
4241
  const baseTelemetryAttributes = getBaseTelemetryAttributes({
4266
4242
  model,
4267
4243
  telemetry,
4268
4244
  headers,
4269
- settings: { ...settings, maxRetries }
4245
+ settings: { ...callSettings, maxRetries }
4270
4246
  });
4271
4247
  const tracer = getTracer(telemetry);
4272
4248
  return recordSpan({
@@ -4286,16 +4262,12 @@ async function generateObject({
4286
4262
  "ai.schema": outputStrategy.jsonSchema != null ? { input: () => JSON.stringify(outputStrategy.jsonSchema) } : void 0,
4287
4263
  "ai.schema.name": schemaName,
4288
4264
  "ai.schema.description": schemaDescription,
4289
- "ai.settings.output": outputStrategy.type,
4290
- "ai.settings.mode": mode
4265
+ "ai.settings.output": outputStrategy.type
4291
4266
  }
4292
4267
  }),
4293
4268
  tracer,
4294
4269
  fn: async (span) => {
4295
- var _a17, _b, _c, _d;
4296
- if (mode === "auto" || mode == null) {
4297
- mode = model.defaultObjectGenerationMode;
4298
- }
4270
+ var _a17;
4299
4271
  let result;
4300
4272
  let finishReason;
4301
4273
  let usage;
@@ -4304,249 +4276,110 @@ async function generateObject({
4304
4276
  let request;
4305
4277
  let logprobs;
4306
4278
  let resultProviderMetadata;
4307
- switch (mode) {
4308
- case "json": {
4309
- const standardizedPrompt = standardizePrompt({
4310
- prompt: {
4311
- system: outputStrategy.jsonSchema == null ? injectJsonInstruction({ prompt: system }) : model.supportsStructuredOutputs ? system : injectJsonInstruction({
4312
- prompt: system,
4313
- schema: outputStrategy.jsonSchema
4314
- }),
4315
- prompt,
4316
- messages
4317
- },
4318
- tools: void 0
4319
- });
4320
- const promptMessages = await convertToLanguageModelPrompt({
4321
- prompt: standardizedPrompt,
4322
- modelSupportsImageUrls: model.supportsImageUrls,
4323
- modelSupportsUrl: (_a17 = model.supportsUrl) == null ? void 0 : _a17.bind(model)
4324
- // support 'this' context
4325
- });
4326
- const generateResult = await retry(
4327
- () => recordSpan({
4328
- name: "ai.generateObject.doGenerate",
4329
- attributes: selectTelemetryAttributes({
4330
- telemetry,
4331
- attributes: {
4332
- ...assembleOperationName({
4333
- operationId: "ai.generateObject.doGenerate",
4334
- telemetry
4335
- }),
4336
- ...baseTelemetryAttributes,
4337
- "ai.prompt.format": {
4338
- input: () => standardizedPrompt.type
4339
- },
4340
- "ai.prompt.messages": {
4341
- input: () => JSON.stringify(promptMessages)
4342
- },
4343
- "ai.settings.mode": mode,
4344
- // standardized gen-ai llm span attributes:
4345
- "gen_ai.system": model.provider,
4346
- "gen_ai.request.model": model.modelId,
4347
- "gen_ai.request.frequency_penalty": settings.frequencyPenalty,
4348
- "gen_ai.request.max_tokens": settings.maxOutputTokens,
4349
- "gen_ai.request.presence_penalty": settings.presencePenalty,
4350
- "gen_ai.request.temperature": settings.temperature,
4351
- "gen_ai.request.top_k": settings.topK,
4352
- "gen_ai.request.top_p": settings.topP
4353
- }
4279
+ const standardizedPrompt = standardizePrompt({
4280
+ prompt: { system, prompt, messages },
4281
+ tools: void 0
4282
+ });
4283
+ const promptMessages = await convertToLanguageModelPrompt({
4284
+ prompt: standardizedPrompt,
4285
+ supportedUrls: await model.getSupportedUrls()
4286
+ });
4287
+ const generateResult = await retry(
4288
+ () => recordSpan({
4289
+ name: "ai.generateObject.doGenerate",
4290
+ attributes: selectTelemetryAttributes({
4291
+ telemetry,
4292
+ attributes: {
4293
+ ...assembleOperationName({
4294
+ operationId: "ai.generateObject.doGenerate",
4295
+ telemetry
4354
4296
  }),
4355
- tracer,
4356
- fn: async (span2) => {
4357
- var _a18, _b2, _c2, _d2, _e, _f, _g, _h;
4358
- const result2 = await model.doGenerate({
4359
- responseFormat: {
4360
- type: "json",
4361
- schema: outputStrategy.jsonSchema,
4362
- name: schemaName,
4363
- description: schemaDescription
4364
- },
4365
- ...prepareCallSettings(settings),
4366
- inputFormat: standardizedPrompt.type,
4367
- prompt: promptMessages,
4368
- providerOptions,
4369
- abortSignal,
4370
- headers
4371
- });
4372
- const responseData = {
4373
- id: (_b2 = (_a18 = result2.response) == null ? void 0 : _a18.id) != null ? _b2 : generateId3(),
4374
- timestamp: (_d2 = (_c2 = result2.response) == null ? void 0 : _c2.timestamp) != null ? _d2 : currentDate(),
4375
- modelId: (_f = (_e = result2.response) == null ? void 0 : _e.modelId) != null ? _f : model.modelId,
4376
- headers: (_g = result2.response) == null ? void 0 : _g.headers,
4377
- body: (_h = result2.response) == null ? void 0 : _h.body
4378
- };
4379
- const text2 = extractContentText(result2.content);
4380
- if (text2 === void 0) {
4381
- throw new NoObjectGeneratedError({
4382
- message: "No object generated: the model did not return a response.",
4383
- response: responseData,
4384
- usage: calculateLanguageModelUsage2(result2.usage),
4385
- finishReason: result2.finishReason
4386
- });
4387
- }
4388
- span2.setAttributes(
4389
- selectTelemetryAttributes({
4390
- telemetry,
4391
- attributes: {
4392
- "ai.response.finishReason": result2.finishReason,
4393
- "ai.response.object": { output: () => text2 },
4394
- "ai.response.id": responseData.id,
4395
- "ai.response.model": responseData.modelId,
4396
- "ai.response.timestamp": responseData.timestamp.toISOString(),
4397
- // TODO rename telemetry attributes to inputTokens and outputTokens
4398
- "ai.usage.promptTokens": result2.usage.inputTokens,
4399
- "ai.usage.completionTokens": result2.usage.outputTokens,
4400
- // standardized gen-ai llm span attributes:
4401
- "gen_ai.response.finish_reasons": [result2.finishReason],
4402
- "gen_ai.response.id": responseData.id,
4403
- "gen_ai.response.model": responseData.modelId,
4404
- "gen_ai.usage.input_tokens": result2.usage.inputTokens,
4405
- "gen_ai.usage.output_tokens": result2.usage.outputTokens
4406
- }
4407
- })
4408
- );
4409
- return { ...result2, objectText: text2, responseData };
4410
- }
4411
- })
4412
- );
4413
- result = generateResult.objectText;
4414
- finishReason = generateResult.finishReason;
4415
- usage = generateResult.usage;
4416
- warnings = generateResult.warnings;
4417
- logprobs = generateResult.logprobs;
4418
- resultProviderMetadata = generateResult.providerMetadata;
4419
- request = (_b = generateResult.request) != null ? _b : {};
4420
- response = generateResult.responseData;
4421
- break;
4422
- }
4423
- case "tool": {
4424
- const standardizedPrompt = standardizePrompt({
4425
- prompt: { system, prompt, messages },
4426
- tools: void 0
4427
- });
4428
- const promptMessages = await convertToLanguageModelPrompt({
4429
- prompt: standardizedPrompt,
4430
- modelSupportsImageUrls: model.supportsImageUrls,
4431
- modelSupportsUrl: (_c = model.supportsUrl) == null ? void 0 : _c.bind(model)
4432
- // support 'this' context,
4433
- });
4434
- const inputFormat = standardizedPrompt.type;
4435
- const generateResult = await retry(
4436
- () => recordSpan({
4437
- name: "ai.generateObject.doGenerate",
4438
- attributes: selectTelemetryAttributes({
4297
+ ...baseTelemetryAttributes,
4298
+ "ai.prompt.format": {
4299
+ input: () => standardizedPrompt.type
4300
+ },
4301
+ "ai.prompt.messages": {
4302
+ input: () => JSON.stringify(promptMessages)
4303
+ },
4304
+ // standardized gen-ai llm span attributes:
4305
+ "gen_ai.system": model.provider,
4306
+ "gen_ai.request.model": model.modelId,
4307
+ "gen_ai.request.frequency_penalty": callSettings.frequencyPenalty,
4308
+ "gen_ai.request.max_tokens": callSettings.maxOutputTokens,
4309
+ "gen_ai.request.presence_penalty": callSettings.presencePenalty,
4310
+ "gen_ai.request.temperature": callSettings.temperature,
4311
+ "gen_ai.request.top_k": callSettings.topK,
4312
+ "gen_ai.request.top_p": callSettings.topP
4313
+ }
4314
+ }),
4315
+ tracer,
4316
+ fn: async (span2) => {
4317
+ var _a18, _b, _c, _d, _e, _f, _g, _h;
4318
+ const result2 = await model.doGenerate({
4319
+ responseFormat: {
4320
+ type: "json",
4321
+ schema: outputStrategy.jsonSchema,
4322
+ name: schemaName,
4323
+ description: schemaDescription
4324
+ },
4325
+ ...prepareCallSettings(settings),
4326
+ inputFormat: standardizedPrompt.type,
4327
+ prompt: promptMessages,
4328
+ providerOptions,
4329
+ abortSignal,
4330
+ headers
4331
+ });
4332
+ const responseData = {
4333
+ id: (_b = (_a18 = result2.response) == null ? void 0 : _a18.id) != null ? _b : generateId3(),
4334
+ timestamp: (_d = (_c = result2.response) == null ? void 0 : _c.timestamp) != null ? _d : currentDate(),
4335
+ modelId: (_f = (_e = result2.response) == null ? void 0 : _e.modelId) != null ? _f : model.modelId,
4336
+ headers: (_g = result2.response) == null ? void 0 : _g.headers,
4337
+ body: (_h = result2.response) == null ? void 0 : _h.body
4338
+ };
4339
+ const text2 = extractContentText(result2.content);
4340
+ if (text2 === void 0) {
4341
+ throw new NoObjectGeneratedError({
4342
+ message: "No object generated: the model did not return a response.",
4343
+ response: responseData,
4344
+ usage: calculateLanguageModelUsage2(result2.usage),
4345
+ finishReason: result2.finishReason
4346
+ });
4347
+ }
4348
+ span2.setAttributes(
4349
+ selectTelemetryAttributes({
4439
4350
  telemetry,
4440
4351
  attributes: {
4441
- ...assembleOperationName({
4442
- operationId: "ai.generateObject.doGenerate",
4443
- telemetry
4444
- }),
4445
- ...baseTelemetryAttributes,
4446
- "ai.prompt.format": {
4447
- input: () => inputFormat
4448
- },
4449
- "ai.prompt.messages": {
4450
- input: () => JSON.stringify(promptMessages)
4451
- },
4452
- "ai.settings.mode": mode,
4352
+ "ai.response.finishReason": result2.finishReason,
4353
+ "ai.response.object": { output: () => text2 },
4354
+ "ai.response.id": responseData.id,
4355
+ "ai.response.model": responseData.modelId,
4356
+ "ai.response.timestamp": responseData.timestamp.toISOString(),
4357
+ // TODO rename telemetry attributes to inputTokens and outputTokens
4358
+ "ai.usage.promptTokens": result2.usage.inputTokens,
4359
+ "ai.usage.completionTokens": result2.usage.outputTokens,
4453
4360
  // standardized gen-ai llm span attributes:
4454
- "gen_ai.system": model.provider,
4455
- "gen_ai.request.model": model.modelId,
4456
- "gen_ai.request.frequency_penalty": settings.frequencyPenalty,
4457
- "gen_ai.request.max_tokens": settings.maxOutputTokens,
4458
- "gen_ai.request.presence_penalty": settings.presencePenalty,
4459
- "gen_ai.request.temperature": settings.temperature,
4460
- "gen_ai.request.top_k": settings.topK,
4461
- "gen_ai.request.top_p": settings.topP
4361
+ "gen_ai.response.finish_reasons": [result2.finishReason],
4362
+ "gen_ai.response.id": responseData.id,
4363
+ "gen_ai.response.model": responseData.modelId,
4364
+ "gen_ai.usage.input_tokens": result2.usage.inputTokens,
4365
+ "gen_ai.usage.output_tokens": result2.usage.outputTokens
4462
4366
  }
4463
- }),
4464
- tracer,
4465
- fn: async (span2) => {
4466
- var _a18, _b2, _c2, _d2, _e, _f, _g, _h;
4467
- const result2 = await model.doGenerate({
4468
- tools: [
4469
- {
4470
- type: "function",
4471
- name: schemaName != null ? schemaName : "json",
4472
- description: schemaDescription != null ? schemaDescription : "Respond with a JSON object.",
4473
- parameters: outputStrategy.jsonSchema
4474
- }
4475
- ],
4476
- toolChoice: { type: "required" },
4477
- ...prepareCallSettings(settings),
4478
- inputFormat,
4479
- prompt: promptMessages,
4480
- providerOptions,
4481
- abortSignal,
4482
- headers
4483
- });
4484
- const firstToolCall = result2.content.find(
4485
- (content) => content.type === "tool-call"
4486
- );
4487
- const objectText = firstToolCall == null ? void 0 : firstToolCall.args;
4488
- const responseData = {
4489
- id: (_b2 = (_a18 = result2.response) == null ? void 0 : _a18.id) != null ? _b2 : generateId3(),
4490
- timestamp: (_d2 = (_c2 = result2.response) == null ? void 0 : _c2.timestamp) != null ? _d2 : currentDate(),
4491
- modelId: (_f = (_e = result2.response) == null ? void 0 : _e.modelId) != null ? _f : model.modelId,
4492
- headers: (_g = result2.response) == null ? void 0 : _g.headers,
4493
- body: (_h = result2.response) == null ? void 0 : _h.body
4494
- };
4495
- if (objectText === void 0) {
4496
- throw new NoObjectGeneratedError({
4497
- message: "No object generated: the tool was not called.",
4498
- response: responseData,
4499
- usage: calculateLanguageModelUsage2(result2.usage),
4500
- finishReason: result2.finishReason
4501
- });
4502
- }
4503
- span2.setAttributes(
4504
- selectTelemetryAttributes({
4505
- telemetry,
4506
- attributes: {
4507
- "ai.response.finishReason": result2.finishReason,
4508
- "ai.response.object": { output: () => objectText },
4509
- "ai.response.id": responseData.id,
4510
- "ai.response.model": responseData.modelId,
4511
- "ai.response.timestamp": responseData.timestamp.toISOString(),
4512
- // TODO rename telemetry attributes to inputTokens and outputTokens
4513
- "ai.usage.promptTokens": result2.usage.inputTokens,
4514
- "ai.usage.completionTokens": result2.usage.outputTokens,
4515
- // standardized gen-ai llm span attributes:
4516
- "gen_ai.response.finish_reasons": [result2.finishReason],
4517
- "gen_ai.response.id": responseData.id,
4518
- "gen_ai.response.model": responseData.modelId,
4519
- "gen_ai.usage.input_tokens": result2.usage.inputTokens,
4520
- "gen_ai.usage.output_tokens": result2.usage.outputTokens
4521
- }
4522
- })
4523
- );
4524
- return { ...result2, objectText, responseData };
4525
- }
4526
- })
4527
- );
4528
- result = generateResult.objectText;
4529
- finishReason = generateResult.finishReason;
4530
- usage = generateResult.usage;
4531
- warnings = generateResult.warnings;
4532
- logprobs = generateResult.logprobs;
4533
- resultProviderMetadata = generateResult.providerMetadata;
4534
- request = (_d = generateResult.request) != null ? _d : {};
4535
- response = generateResult.responseData;
4536
- break;
4537
- }
4538
- case void 0: {
4539
- throw new Error(
4540
- "Model does not have a default object generation mode."
4541
- );
4542
- }
4543
- default: {
4544
- const _exhaustiveCheck = mode;
4545
- throw new Error(`Unsupported mode: ${_exhaustiveCheck}`);
4546
- }
4547
- }
4367
+ })
4368
+ );
4369
+ return { ...result2, objectText: text2, responseData };
4370
+ }
4371
+ })
4372
+ );
4373
+ result = generateResult.objectText;
4374
+ finishReason = generateResult.finishReason;
4375
+ usage = generateResult.usage;
4376
+ warnings = generateResult.warnings;
4377
+ logprobs = generateResult.logprobs;
4378
+ resultProviderMetadata = generateResult.providerMetadata;
4379
+ request = (_a17 = generateResult.request) != null ? _a17 : {};
4380
+ response = generateResult.responseData;
4548
4381
  function processResult(result2) {
4549
- const parseResult = (0, import_provider_utils12.safeParseJSON)({ text: result2 });
4382
+ const parseResult = (0, import_provider_utils13.safeParseJSON)({ text: result2 });
4550
4383
  if (!parseResult.success) {
4551
4384
  throw new NoObjectGeneratedError({
4552
4385
  message: "No object generated: could not parse the response.",
@@ -4644,7 +4477,7 @@ var DefaultGenerateObjectResult = class {
4644
4477
  };
4645
4478
 
4646
4479
  // core/generate-object/stream-object.ts
4647
- var import_provider_utils13 = require("@ai-sdk/provider-utils");
4480
+ var import_provider_utils14 = require("@ai-sdk/provider-utils");
4648
4481
 
4649
4482
  // util/delayed-promise.ts
4650
4483
  var DelayedPromise = class {
@@ -4788,13 +4621,12 @@ function now() {
4788
4621
  }
4789
4622
 
4790
4623
  // core/generate-object/stream-object.ts
4791
- var originalGenerateId2 = (0, import_provider_utils13.createIdGenerator)({ prefix: "aiobj", size: 24 });
4624
+ var originalGenerateId2 = (0, import_provider_utils14.createIdGenerator)({ prefix: "aiobj", size: 24 });
4792
4625
  function streamObject({
4793
4626
  model,
4794
4627
  schema: inputSchema,
4795
4628
  schemaName,
4796
4629
  schemaDescription,
4797
- mode,
4798
4630
  output = "object",
4799
4631
  system,
4800
4632
  prompt,
@@ -4815,15 +4647,11 @@ function streamObject({
4815
4647
  }) {
4816
4648
  validateObjectGenerationInput({
4817
4649
  output,
4818
- mode,
4819
4650
  schema: inputSchema,
4820
4651
  schemaName,
4821
4652
  schemaDescription
4822
4653
  });
4823
4654
  const outputStrategy = getOutputStrategy({ output, schema: inputSchema });
4824
- if (outputStrategy.type === "no-schema" && mode === void 0) {
4825
- mode = "json";
4826
- }
4827
4655
  return new DefaultStreamObjectResult({
4828
4656
  model,
4829
4657
  telemetry,
@@ -4838,7 +4666,6 @@ function streamObject({
4838
4666
  schemaName,
4839
4667
  schemaDescription,
4840
4668
  providerOptions,
4841
- mode,
4842
4669
  onError,
4843
4670
  onFinish,
4844
4671
  generateId: generateId3,
@@ -4861,7 +4688,6 @@ var DefaultStreamObjectResult = class {
4861
4688
  schemaName,
4862
4689
  schemaDescription,
4863
4690
  providerOptions,
4864
- mode,
4865
4691
  onError,
4866
4692
  onFinish,
4867
4693
  generateId: generateId3,
@@ -4877,11 +4703,12 @@ var DefaultStreamObjectResult = class {
4877
4703
  const { maxRetries, retry } = prepareRetries({
4878
4704
  maxRetries: maxRetriesArg
4879
4705
  });
4706
+ const callSettings = prepareCallSettings(settings);
4880
4707
  const baseTelemetryAttributes = getBaseTelemetryAttributes({
4881
4708
  model,
4882
4709
  telemetry,
4883
4710
  headers,
4884
- settings: { ...settings, maxRetries }
4711
+ settings: { ...callSettings, maxRetries }
4885
4712
  });
4886
4713
  const tracer = getTracer(telemetry);
4887
4714
  const self = this;
@@ -4912,120 +4739,47 @@ var DefaultStreamObjectResult = class {
4912
4739
  "ai.schema": outputStrategy.jsonSchema != null ? { input: () => JSON.stringify(outputStrategy.jsonSchema) } : void 0,
4913
4740
  "ai.schema.name": schemaName,
4914
4741
  "ai.schema.description": schemaDescription,
4915
- "ai.settings.output": outputStrategy.type,
4916
- "ai.settings.mode": mode
4742
+ "ai.settings.output": outputStrategy.type
4917
4743
  }
4918
4744
  }),
4919
4745
  tracer,
4920
4746
  endWhenDone: false,
4921
4747
  fn: async (rootSpan) => {
4922
- var _a17, _b;
4923
- if (mode === "auto" || mode == null) {
4924
- mode = model.defaultObjectGenerationMode;
4925
- }
4926
- let callOptions;
4927
- let transformer;
4928
- switch (mode) {
4929
- case "json": {
4930
- const standardizedPrompt = standardizePrompt({
4931
- prompt: {
4932
- system: outputStrategy.jsonSchema == null ? injectJsonInstruction({ prompt: system }) : model.supportsStructuredOutputs ? system : injectJsonInstruction({
4933
- prompt: system,
4934
- schema: outputStrategy.jsonSchema
4935
- }),
4936
- prompt,
4937
- messages
4938
- },
4939
- tools: void 0
4940
- });
4941
- callOptions = {
4942
- responseFormat: {
4943
- type: "json",
4944
- schema: outputStrategy.jsonSchema,
4945
- name: schemaName,
4946
- description: schemaDescription
4947
- },
4948
- ...prepareCallSettings(settings),
4949
- inputFormat: standardizedPrompt.type,
4950
- prompt: await convertToLanguageModelPrompt({
4951
- prompt: standardizedPrompt,
4952
- modelSupportsImageUrls: model.supportsImageUrls,
4953
- modelSupportsUrl: (_a17 = model.supportsUrl) == null ? void 0 : _a17.bind(model)
4954
- // support 'this' context
4955
- }),
4956
- providerOptions,
4957
- abortSignal,
4958
- headers
4959
- };
4960
- transformer = {
4961
- transform: (chunk, controller) => {
4962
- switch (chunk.type) {
4963
- case "text":
4964
- controller.enqueue(chunk.text);
4965
- break;
4966
- case "response-metadata":
4967
- case "finish":
4968
- case "error":
4969
- controller.enqueue(chunk);
4970
- break;
4971
- }
4972
- }
4973
- };
4974
- break;
4975
- }
4976
- case "tool": {
4977
- const standardizedPrompt = standardizePrompt({
4978
- prompt: { system, prompt, messages },
4979
- tools: void 0
4980
- });
4981
- callOptions = {
4982
- tools: [
4983
- {
4984
- type: "function",
4985
- name: schemaName != null ? schemaName : "json",
4986
- description: schemaDescription != null ? schemaDescription : "Respond with a JSON object.",
4987
- parameters: outputStrategy.jsonSchema
4988
- }
4989
- ],
4990
- toolChoice: { type: "required" },
4991
- ...prepareCallSettings(settings),
4992
- inputFormat: standardizedPrompt.type,
4993
- prompt: await convertToLanguageModelPrompt({
4994
- prompt: standardizedPrompt,
4995
- modelSupportsImageUrls: model.supportsImageUrls,
4996
- modelSupportsUrl: (_b = model.supportsUrl) == null ? void 0 : _b.bind(model)
4997
- // support 'this' context,
4998
- }),
4999
- providerOptions,
5000
- abortSignal,
5001
- headers
5002
- };
5003
- transformer = {
5004
- transform(chunk, controller) {
5005
- switch (chunk.type) {
5006
- case "tool-call-delta":
5007
- controller.enqueue(chunk.argsTextDelta);
5008
- break;
5009
- case "response-metadata":
5010
- case "finish":
5011
- case "error":
5012
- controller.enqueue(chunk);
5013
- break;
5014
- }
5015
- }
5016
- };
5017
- break;
5018
- }
5019
- case void 0: {
5020
- throw new Error(
5021
- "Model does not have a default object generation mode."
5022
- );
5023
- }
5024
- default: {
5025
- const _exhaustiveCheck = mode;
5026
- throw new Error(`Unsupported mode: ${_exhaustiveCheck}`);
4748
+ const standardizedPrompt = standardizePrompt({
4749
+ prompt: { system, prompt, messages },
4750
+ tools: void 0
4751
+ });
4752
+ const callOptions = {
4753
+ responseFormat: {
4754
+ type: "json",
4755
+ schema: outputStrategy.jsonSchema,
4756
+ name: schemaName,
4757
+ description: schemaDescription
4758
+ },
4759
+ ...prepareCallSettings(settings),
4760
+ inputFormat: standardizedPrompt.type,
4761
+ prompt: await convertToLanguageModelPrompt({
4762
+ prompt: standardizedPrompt,
4763
+ supportedUrls: await model.getSupportedUrls()
4764
+ }),
4765
+ providerOptions,
4766
+ abortSignal,
4767
+ headers
4768
+ };
4769
+ const transformer = {
4770
+ transform: (chunk, controller) => {
4771
+ switch (chunk.type) {
4772
+ case "text":
4773
+ controller.enqueue(chunk.text);
4774
+ break;
4775
+ case "response-metadata":
4776
+ case "finish":
4777
+ case "error":
4778
+ controller.enqueue(chunk);
4779
+ break;
4780
+ }
5027
4781
  }
5028
- }
4782
+ };
5029
4783
  const {
5030
4784
  result: { stream, response, request },
5031
4785
  doStreamSpan,
@@ -5047,16 +4801,15 @@ var DefaultStreamObjectResult = class {
5047
4801
  "ai.prompt.messages": {
5048
4802
  input: () => JSON.stringify(callOptions.prompt)
5049
4803
  },
5050
- "ai.settings.mode": mode,
5051
4804
  // standardized gen-ai llm span attributes:
5052
4805
  "gen_ai.system": model.provider,
5053
4806
  "gen_ai.request.model": model.modelId,
5054
- "gen_ai.request.frequency_penalty": settings.frequencyPenalty,
5055
- "gen_ai.request.max_tokens": settings.maxOutputTokens,
5056
- "gen_ai.request.presence_penalty": settings.presencePenalty,
5057
- "gen_ai.request.temperature": settings.temperature,
5058
- "gen_ai.request.top_k": settings.topK,
5059
- "gen_ai.request.top_p": settings.topP
4807
+ "gen_ai.request.frequency_penalty": callSettings.frequencyPenalty,
4808
+ "gen_ai.request.max_tokens": callSettings.maxOutputTokens,
4809
+ "gen_ai.request.presence_penalty": callSettings.presencePenalty,
4810
+ "gen_ai.request.temperature": callSettings.temperature,
4811
+ "gen_ai.request.top_k": callSettings.topK,
4812
+ "gen_ai.request.top_p": callSettings.topP
5060
4813
  }
5061
4814
  }),
5062
4815
  tracer,
@@ -5089,7 +4842,7 @@ var DefaultStreamObjectResult = class {
5089
4842
  const transformedStream = stream.pipeThrough(new TransformStream(transformer)).pipeThrough(
5090
4843
  new TransformStream({
5091
4844
  async transform(chunk, controller) {
5092
- var _a18, _b2, _c;
4845
+ var _a17, _b, _c;
5093
4846
  if (typeof chunk === "object" && chunk.type === "stream-start") {
5094
4847
  warnings = chunk.warnings;
5095
4848
  return;
@@ -5139,8 +4892,8 @@ var DefaultStreamObjectResult = class {
5139
4892
  switch (chunk.type) {
5140
4893
  case "response-metadata": {
5141
4894
  fullResponse = {
5142
- id: (_a18 = chunk.id) != null ? _a18 : fullResponse.id,
5143
- timestamp: (_b2 = chunk.timestamp) != null ? _b2 : fullResponse.timestamp,
4895
+ id: (_a17 = chunk.id) != null ? _a17 : fullResponse.id,
4896
+ timestamp: (_b = chunk.timestamp) != null ? _b : fullResponse.timestamp,
5144
4897
  modelId: (_c = chunk.modelId) != null ? _c : fullResponse.modelId
5145
4898
  };
5146
4899
  break;
@@ -5364,7 +5117,7 @@ var DefaultStreamObjectResult = class {
5364
5117
  };
5365
5118
 
5366
5119
  // core/generate-text/generate-text.ts
5367
- var import_provider_utils15 = require("@ai-sdk/provider-utils");
5120
+ var import_provider_utils16 = require("@ai-sdk/provider-utils");
5368
5121
 
5369
5122
  // errors/no-output-specified-error.ts
5370
5123
  var import_provider14 = require("@ai-sdk/provider");
@@ -5473,7 +5226,7 @@ function removeTextAfterLastWhitespace(text2) {
5473
5226
  }
5474
5227
 
5475
5228
  // core/generate-text/parse-tool-call.ts
5476
- var import_provider_utils14 = require("@ai-sdk/provider-utils");
5229
+ var import_provider_utils15 = require("@ai-sdk/provider-utils");
5477
5230
 
5478
5231
  // errors/invalid-tool-arguments-error.ts
5479
5232
  var import_provider16 = require("@ai-sdk/provider");
@@ -5601,7 +5354,7 @@ async function doParseToolCall({
5601
5354
  });
5602
5355
  }
5603
5356
  const schema = asSchema(tool2.parameters);
5604
- const parseResult = toolCall.args.trim() === "" ? (0, import_provider_utils14.safeValidateTypes)({ value: {}, schema }) : (0, import_provider_utils14.safeParseJSON)({ text: toolCall.args, schema });
5357
+ const parseResult = toolCall.args.trim() === "" ? (0, import_provider_utils15.safeValidateTypes)({ value: {}, schema }) : (0, import_provider_utils15.safeParseJSON)({ text: toolCall.args, schema });
5605
5358
  if (parseResult.success === false) {
5606
5359
  throw new InvalidToolArgumentsError({
5607
5360
  toolName,
@@ -5692,11 +5445,11 @@ function toResponseMessages({
5692
5445
  }
5693
5446
 
5694
5447
  // core/generate-text/generate-text.ts
5695
- var originalGenerateId3 = (0, import_provider_utils15.createIdGenerator)({
5448
+ var originalGenerateId3 = (0, import_provider_utils16.createIdGenerator)({
5696
5449
  prefix: "aitxt",
5697
5450
  size: 24
5698
5451
  });
5699
- var originalGenerateMessageId = (0, import_provider_utils15.createIdGenerator)({
5452
+ var originalGenerateMessageId = (0, import_provider_utils16.createIdGenerator)({
5700
5453
  prefix: "msg",
5701
5454
  size: 24
5702
5455
  });
@@ -5725,7 +5478,6 @@ async function generateText({
5725
5478
  onStepFinish,
5726
5479
  ...settings
5727
5480
  }) {
5728
- var _a17;
5729
5481
  if (maxSteps < 1) {
5730
5482
  throw new InvalidArgumentError({
5731
5483
  parameter: "maxSteps",
@@ -5734,18 +5486,15 @@ async function generateText({
5734
5486
  });
5735
5487
  }
5736
5488
  const { maxRetries, retry } = prepareRetries({ maxRetries: maxRetriesArg });
5489
+ const callSettings = prepareCallSettings(settings);
5737
5490
  const baseTelemetryAttributes = getBaseTelemetryAttributes({
5738
5491
  model,
5739
5492
  telemetry,
5740
5493
  headers,
5741
- settings: { ...settings, maxRetries }
5494
+ settings: { ...callSettings, maxRetries }
5742
5495
  });
5743
5496
  const initialPrompt = standardizePrompt({
5744
- prompt: {
5745
- system: (_a17 = output == null ? void 0 : output.injectIntoSystemPrompt({ system, model })) != null ? _a17 : system,
5746
- prompt,
5747
- messages
5748
- },
5497
+ prompt: { system, prompt, messages },
5749
5498
  tools
5750
5499
  });
5751
5500
  const tracer = getTracer(telemetry);
@@ -5768,11 +5517,10 @@ async function generateText({
5768
5517
  }),
5769
5518
  tracer,
5770
5519
  fn: async (span) => {
5771
- var _a18, _b, _c, _d;
5520
+ var _a17, _b, _c;
5772
5521
  const toolsAndToolChoice = {
5773
5522
  ...prepareToolsAndToolChoice({ tools, toolChoice, activeTools })
5774
5523
  };
5775
- const callSettings = prepareCallSettings(settings);
5776
5524
  let currentModelResponse;
5777
5525
  let currentToolCalls = [];
5778
5526
  let currentToolResults = [];
@@ -5800,99 +5548,100 @@ async function generateText({
5800
5548
  system: initialPrompt.system,
5801
5549
  messages: stepInputMessages
5802
5550
  },
5803
- modelSupportsImageUrls: model.supportsImageUrls,
5804
- modelSupportsUrl: (_a18 = model.supportsUrl) == null ? void 0 : _a18.bind(model)
5805
- // support 'this' context
5551
+ supportedUrls: await model.getSupportedUrls()
5806
5552
  });
5807
5553
  currentModelResponse = await retry(
5808
- () => recordSpan({
5809
- name: "ai.generateText.doGenerate",
5810
- attributes: selectTelemetryAttributes({
5811
- telemetry,
5812
- attributes: {
5813
- ...assembleOperationName({
5814
- operationId: "ai.generateText.doGenerate",
5815
- telemetry
5816
- }),
5817
- ...baseTelemetryAttributes,
5818
- "ai.prompt.format": { input: () => promptFormat },
5819
- "ai.prompt.messages": {
5820
- input: () => JSON.stringify(promptMessages)
5821
- },
5822
- "ai.prompt.tools": {
5823
- // convert the language model level tools:
5824
- input: () => {
5825
- var _a19;
5826
- return (_a19 = toolsAndToolChoice.tools) == null ? void 0 : _a19.map((tool2) => JSON.stringify(tool2));
5827
- }
5828
- },
5829
- "ai.prompt.toolChoice": {
5830
- input: () => toolsAndToolChoice.toolChoice != null ? JSON.stringify(toolsAndToolChoice.toolChoice) : void 0
5831
- },
5832
- // standardized gen-ai llm span attributes:
5833
- "gen_ai.system": model.provider,
5834
- "gen_ai.request.model": model.modelId,
5835
- "gen_ai.request.frequency_penalty": settings.frequencyPenalty,
5836
- "gen_ai.request.max_tokens": settings.maxOutputTokens,
5837
- "gen_ai.request.presence_penalty": settings.presencePenalty,
5838
- "gen_ai.request.stop_sequences": settings.stopSequences,
5839
- "gen_ai.request.temperature": settings.temperature,
5840
- "gen_ai.request.top_k": settings.topK,
5841
- "gen_ai.request.top_p": settings.topP
5554
+ () => {
5555
+ var _a18;
5556
+ return recordSpan({
5557
+ name: "ai.generateText.doGenerate",
5558
+ attributes: selectTelemetryAttributes({
5559
+ telemetry,
5560
+ attributes: {
5561
+ ...assembleOperationName({
5562
+ operationId: "ai.generateText.doGenerate",
5563
+ telemetry
5564
+ }),
5565
+ ...baseTelemetryAttributes,
5566
+ "ai.prompt.format": { input: () => promptFormat },
5567
+ "ai.prompt.messages": {
5568
+ input: () => JSON.stringify(promptMessages)
5569
+ },
5570
+ "ai.prompt.tools": {
5571
+ // convert the language model level tools:
5572
+ input: () => {
5573
+ var _a19;
5574
+ return (_a19 = toolsAndToolChoice.tools) == null ? void 0 : _a19.map((tool2) => JSON.stringify(tool2));
5575
+ }
5576
+ },
5577
+ "ai.prompt.toolChoice": {
5578
+ input: () => toolsAndToolChoice.toolChoice != null ? JSON.stringify(toolsAndToolChoice.toolChoice) : void 0
5579
+ },
5580
+ // standardized gen-ai llm span attributes:
5581
+ "gen_ai.system": model.provider,
5582
+ "gen_ai.request.model": model.modelId,
5583
+ "gen_ai.request.frequency_penalty": settings.frequencyPenalty,
5584
+ "gen_ai.request.max_tokens": settings.maxOutputTokens,
5585
+ "gen_ai.request.presence_penalty": settings.presencePenalty,
5586
+ "gen_ai.request.stop_sequences": settings.stopSequences,
5587
+ "gen_ai.request.temperature": (_a18 = settings.temperature) != null ? _a18 : void 0,
5588
+ "gen_ai.request.top_k": settings.topK,
5589
+ "gen_ai.request.top_p": settings.topP
5590
+ }
5591
+ }),
5592
+ tracer,
5593
+ fn: async (span2) => {
5594
+ var _a19, _b2, _c2, _d, _e, _f, _g, _h;
5595
+ const result = await model.doGenerate({
5596
+ ...callSettings,
5597
+ ...toolsAndToolChoice,
5598
+ inputFormat: promptFormat,
5599
+ responseFormat: output == null ? void 0 : output.responseFormat,
5600
+ prompt: promptMessages,
5601
+ providerOptions,
5602
+ abortSignal,
5603
+ headers
5604
+ });
5605
+ const responseData = {
5606
+ id: (_b2 = (_a19 = result.response) == null ? void 0 : _a19.id) != null ? _b2 : generateId3(),
5607
+ timestamp: (_d = (_c2 = result.response) == null ? void 0 : _c2.timestamp) != null ? _d : currentDate(),
5608
+ modelId: (_f = (_e = result.response) == null ? void 0 : _e.modelId) != null ? _f : model.modelId,
5609
+ headers: (_g = result.response) == null ? void 0 : _g.headers,
5610
+ body: (_h = result.response) == null ? void 0 : _h.body
5611
+ };
5612
+ span2.setAttributes(
5613
+ selectTelemetryAttributes({
5614
+ telemetry,
5615
+ attributes: {
5616
+ "ai.response.finishReason": result.finishReason,
5617
+ "ai.response.text": {
5618
+ output: () => extractContentText(result.content)
5619
+ },
5620
+ "ai.response.toolCalls": {
5621
+ output: () => {
5622
+ const toolCalls = asToolCalls(result.content);
5623
+ return toolCalls == null ? void 0 : JSON.stringify(toolCalls);
5624
+ }
5625
+ },
5626
+ "ai.response.id": responseData.id,
5627
+ "ai.response.model": responseData.modelId,
5628
+ "ai.response.timestamp": responseData.timestamp.toISOString(),
5629
+ // TODO rename telemetry attributes to inputTokens and outputTokens
5630
+ "ai.usage.promptTokens": result.usage.inputTokens,
5631
+ "ai.usage.completionTokens": result.usage.outputTokens,
5632
+ // standardized gen-ai llm span attributes:
5633
+ "gen_ai.response.finish_reasons": [result.finishReason],
5634
+ "gen_ai.response.id": responseData.id,
5635
+ "gen_ai.response.model": responseData.modelId,
5636
+ "gen_ai.usage.input_tokens": result.usage.inputTokens,
5637
+ "gen_ai.usage.output_tokens": result.usage.outputTokens
5638
+ }
5639
+ })
5640
+ );
5641
+ return { ...result, response: responseData };
5842
5642
  }
5843
- }),
5844
- tracer,
5845
- fn: async (span2) => {
5846
- var _a19, _b2, _c2, _d2, _e, _f, _g, _h;
5847
- const result = await model.doGenerate({
5848
- ...callSettings,
5849
- ...toolsAndToolChoice,
5850
- inputFormat: promptFormat,
5851
- responseFormat: output == null ? void 0 : output.responseFormat({ model }),
5852
- prompt: promptMessages,
5853
- providerOptions,
5854
- abortSignal,
5855
- headers
5856
- });
5857
- const responseData = {
5858
- id: (_b2 = (_a19 = result.response) == null ? void 0 : _a19.id) != null ? _b2 : generateId3(),
5859
- timestamp: (_d2 = (_c2 = result.response) == null ? void 0 : _c2.timestamp) != null ? _d2 : currentDate(),
5860
- modelId: (_f = (_e = result.response) == null ? void 0 : _e.modelId) != null ? _f : model.modelId,
5861
- headers: (_g = result.response) == null ? void 0 : _g.headers,
5862
- body: (_h = result.response) == null ? void 0 : _h.body
5863
- };
5864
- span2.setAttributes(
5865
- selectTelemetryAttributes({
5866
- telemetry,
5867
- attributes: {
5868
- "ai.response.finishReason": result.finishReason,
5869
- "ai.response.text": {
5870
- output: () => extractContentText(result.content)
5871
- },
5872
- "ai.response.toolCalls": {
5873
- output: () => {
5874
- const toolCalls = asToolCalls(result.content);
5875
- return toolCalls == null ? void 0 : JSON.stringify(toolCalls);
5876
- }
5877
- },
5878
- "ai.response.id": responseData.id,
5879
- "ai.response.model": responseData.modelId,
5880
- "ai.response.timestamp": responseData.timestamp.toISOString(),
5881
- // TODO rename telemetry attributes to inputTokens and outputTokens
5882
- "ai.usage.promptTokens": result.usage.inputTokens,
5883
- "ai.usage.completionTokens": result.usage.outputTokens,
5884
- // standardized gen-ai llm span attributes:
5885
- "gen_ai.response.finish_reasons": [result.finishReason],
5886
- "gen_ai.response.id": responseData.id,
5887
- "gen_ai.response.model": responseData.modelId,
5888
- "gen_ai.usage.input_tokens": result.usage.inputTokens,
5889
- "gen_ai.usage.output_tokens": result.usage.outputTokens
5890
- }
5891
- })
5892
- );
5893
- return { ...result, response: responseData };
5894
- }
5895
- })
5643
+ });
5644
+ }
5896
5645
  );
5897
5646
  currentToolCalls = await Promise.all(
5898
5647
  currentModelResponse.content.filter(
@@ -5932,7 +5681,7 @@ async function generateText({
5932
5681
  nextStepType = "tool-result";
5933
5682
  }
5934
5683
  }
5935
- const originalText = (_b = extractContentText(currentModelResponse.content)) != null ? _b : "";
5684
+ const originalText = (_a17 = extractContentText(currentModelResponse.content)) != null ? _a17 : "";
5936
5685
  const stepTextLeadingWhitespaceTrimmed = stepType === "continue" && // only for continue steps
5937
5686
  text2.trimEnd() !== text2 ? originalText.trimStart() : originalText;
5938
5687
  const stepText = nextStepType === "continue" ? removeTextAfterLastWhitespace(stepTextLeadingWhitespaceTrimmed) : stepTextLeadingWhitespaceTrimmed;
@@ -5984,7 +5733,7 @@ async function generateText({
5984
5733
  usage: currentUsage,
5985
5734
  warnings: currentModelResponse.warnings,
5986
5735
  logprobs: currentModelResponse.logprobs,
5987
- request: (_c = currentModelResponse.request) != null ? _c : {},
5736
+ request: (_b = currentModelResponse.request) != null ? _b : {},
5988
5737
  response: {
5989
5738
  ...currentModelResponse.response,
5990
5739
  // deep clone msgs to avoid mutating past messages in multi-step:
@@ -6041,7 +5790,7 @@ async function generateText({
6041
5790
  finishReason: currentModelResponse.finishReason,
6042
5791
  usage,
6043
5792
  warnings: currentModelResponse.warnings,
6044
- request: (_d = currentModelResponse.request) != null ? _d : {},
5793
+ request: (_c = currentModelResponse.request) != null ? _c : {},
6045
5794
  response: {
6046
5795
  ...currentModelResponse.response,
6047
5796
  messages: responseMessages
@@ -6203,7 +5952,7 @@ __export(output_exports, {
6203
5952
  object: () => object,
6204
5953
  text: () => text
6205
5954
  });
6206
- var import_provider_utils16 = require("@ai-sdk/provider-utils");
5955
+ var import_provider_utils17 = require("@ai-sdk/provider-utils");
6207
5956
 
6208
5957
  // errors/index.ts
6209
5958
  var import_provider21 = require("@ai-sdk/provider");
@@ -6253,10 +6002,7 @@ _a15 = symbol15;
6253
6002
  // core/generate-text/output.ts
6254
6003
  var text = () => ({
6255
6004
  type: "text",
6256
- responseFormat: () => ({ type: "text" }),
6257
- injectIntoSystemPrompt({ system }) {
6258
- return system;
6259
- },
6005
+ responseFormat: { type: "text" },
6260
6006
  parsePartial({ text: text2 }) {
6261
6007
  return { partial: text2 };
6262
6008
  },
@@ -6270,15 +6016,9 @@ var object = ({
6270
6016
  const schema = asSchema(inputSchema);
6271
6017
  return {
6272
6018
  type: "object",
6273
- responseFormat: ({ model }) => ({
6019
+ responseFormat: {
6274
6020
  type: "json",
6275
- schema: model.supportsStructuredOutputs ? schema.jsonSchema : void 0
6276
- }),
6277
- injectIntoSystemPrompt({ system, model }) {
6278
- return model.supportsStructuredOutputs ? system : injectJsonInstruction({
6279
- prompt: system,
6280
- schema: schema.jsonSchema
6281
- });
6021
+ schema: schema.jsonSchema
6282
6022
  },
6283
6023
  parsePartial({ text: text2 }) {
6284
6024
  const result = parsePartialJson(text2);
@@ -6299,7 +6039,7 @@ var object = ({
6299
6039
  }
6300
6040
  },
6301
6041
  parseOutput({ text: text2 }, context) {
6302
- const parseResult = (0, import_provider_utils16.safeParseJSON)({ text: text2 });
6042
+ const parseResult = (0, import_provider_utils17.safeParseJSON)({ text: text2 });
6303
6043
  if (!parseResult.success) {
6304
6044
  throw new NoObjectGeneratedError({
6305
6045
  message: "No object generated: could not parse the response.",
@@ -6310,7 +6050,7 @@ var object = ({
6310
6050
  finishReason: context.finishReason
6311
6051
  });
6312
6052
  }
6313
- const validationResult = (0, import_provider_utils16.safeValidateTypes)({
6053
+ const validationResult = (0, import_provider_utils17.safeValidateTypes)({
6314
6054
  value: parseResult.value,
6315
6055
  schema
6316
6056
  });
@@ -6330,7 +6070,7 @@ var object = ({
6330
6070
  };
6331
6071
 
6332
6072
  // core/generate-text/smooth-stream.ts
6333
- var import_provider_utils17 = require("@ai-sdk/provider-utils");
6073
+ var import_provider_utils18 = require("@ai-sdk/provider-utils");
6334
6074
  var import_provider22 = require("@ai-sdk/provider");
6335
6075
  var CHUNKING_REGEXPS = {
6336
6076
  word: /\S+\s+/m,
@@ -6339,7 +6079,7 @@ var CHUNKING_REGEXPS = {
6339
6079
  function smoothStream({
6340
6080
  delayInMs = 10,
6341
6081
  chunking = "word",
6342
- _internal: { delay: delay2 = import_provider_utils17.delay } = {}
6082
+ _internal: { delay: delay2 = import_provider_utils18.delay } = {}
6343
6083
  } = {}) {
6344
6084
  let detectChunk;
6345
6085
  if (typeof chunking === "function") {
@@ -6400,7 +6140,7 @@ function smoothStream({
6400
6140
 
6401
6141
  // core/generate-text/stream-text.ts
6402
6142
  var import_provider23 = require("@ai-sdk/provider");
6403
- var import_provider_utils18 = require("@ai-sdk/provider-utils");
6143
+ var import_provider_utils19 = require("@ai-sdk/provider-utils");
6404
6144
 
6405
6145
  // util/as-array.ts
6406
6146
  function asArray(value) {
@@ -6717,11 +6457,11 @@ function runToolsTransformation({
6717
6457
  }
6718
6458
 
6719
6459
  // core/generate-text/stream-text.ts
6720
- var originalGenerateId4 = (0, import_provider_utils18.createIdGenerator)({
6460
+ var originalGenerateId4 = (0, import_provider_utils19.createIdGenerator)({
6721
6461
  prefix: "aitxt",
6722
6462
  size: 24
6723
6463
  });
6724
- var originalGenerateMessageId2 = (0, import_provider_utils18.createIdGenerator)({
6464
+ var originalGenerateMessageId2 = (0, import_provider_utils19.createIdGenerator)({
6725
6465
  prefix: "msg",
6726
6466
  size: 24
6727
6467
  });
@@ -6879,7 +6619,6 @@ var DefaultStreamTextResult = class {
6879
6619
  this.requestPromise = new DelayedPromise();
6880
6620
  this.responsePromise = new DelayedPromise();
6881
6621
  this.stepsPromise = new DelayedPromise();
6882
- var _a17;
6883
6622
  if (maxSteps < 1) {
6884
6623
  throw new InvalidArgumentError({
6885
6624
  parameter: "maxSteps",
@@ -7031,7 +6770,7 @@ var DefaultStreamTextResult = class {
7031
6770
  }
7032
6771
  },
7033
6772
  async flush(controller) {
7034
- var _a18;
6773
+ var _a17;
7035
6774
  try {
7036
6775
  if (recordedSteps.length === 0) {
7037
6776
  return;
@@ -7068,7 +6807,7 @@ var DefaultStreamTextResult = class {
7068
6807
  sources: lastStep.sources,
7069
6808
  toolCalls: lastStep.toolCalls,
7070
6809
  toolResults: lastStep.toolResults,
7071
- request: (_a18 = lastStep.request) != null ? _a18 : {},
6810
+ request: (_a17 = lastStep.request) != null ? _a17 : {},
7072
6811
  response: lastStep.response,
7073
6812
  warnings: lastStep.warnings,
7074
6813
  providerMetadata: lastStep.providerMetadata,
@@ -7082,8 +6821,8 @@ var DefaultStreamTextResult = class {
7082
6821
  "ai.response.text": { output: () => recordedFullText },
7083
6822
  "ai.response.toolCalls": {
7084
6823
  output: () => {
7085
- var _a19;
7086
- return ((_a19 = lastStep.toolCalls) == null ? void 0 : _a19.length) ? JSON.stringify(lastStep.toolCalls) : void 0;
6824
+ var _a18;
6825
+ return ((_a18 = lastStep.toolCalls) == null ? void 0 : _a18.length) ? JSON.stringify(lastStep.toolCalls) : void 0;
7087
6826
  }
7088
6827
  },
7089
6828
  "ai.usage.promptTokens": usage.promptTokens,
@@ -7117,18 +6856,15 @@ var DefaultStreamTextResult = class {
7117
6856
  maxRetries: maxRetriesArg
7118
6857
  });
7119
6858
  const tracer = getTracer(telemetry);
6859
+ const callSettings = prepareCallSettings(settings);
7120
6860
  const baseTelemetryAttributes = getBaseTelemetryAttributes({
7121
6861
  model,
7122
6862
  telemetry,
7123
6863
  headers,
7124
- settings: { ...settings, maxRetries }
6864
+ settings: { ...callSettings, maxRetries }
7125
6865
  });
7126
6866
  const initialPrompt = standardizePrompt({
7127
- prompt: {
7128
- system: (_a17 = output == null ? void 0 : output.injectIntoSystemPrompt({ system, model })) != null ? _a17 : system,
7129
- prompt,
7130
- messages
7131
- },
6867
+ prompt: { system, prompt, messages },
7132
6868
  tools
7133
6869
  });
7134
6870
  const self = this;
@@ -7159,7 +6895,6 @@ var DefaultStreamTextResult = class {
7159
6895
  hasLeadingWhitespace,
7160
6896
  messageId
7161
6897
  }) {
7162
- var _a18;
7163
6898
  const promptFormat = responseMessages.length === 0 ? initialPrompt.type : "messages";
7164
6899
  const stepInputMessages = [
7165
6900
  ...initialPrompt.messages,
@@ -7171,9 +6906,7 @@ var DefaultStreamTextResult = class {
7171
6906
  system: initialPrompt.system,
7172
6907
  messages: stepInputMessages
7173
6908
  },
7174
- modelSupportsImageUrls: model.supportsImageUrls,
7175
- modelSupportsUrl: (_a18 = model.supportsUrl) == null ? void 0 : _a18.bind(model)
7176
- // support 'this' context
6909
+ supportedUrls: await model.getSupportedUrls()
7177
6910
  });
7178
6911
  const toolsAndToolChoice = {
7179
6912
  ...prepareToolsAndToolChoice({ tools, toolChoice, activeTools })
@@ -7202,8 +6935,8 @@ var DefaultStreamTextResult = class {
7202
6935
  "ai.prompt.tools": {
7203
6936
  // convert the language model level tools:
7204
6937
  input: () => {
7205
- var _a19;
7206
- return (_a19 = toolsAndToolChoice.tools) == null ? void 0 : _a19.map(
6938
+ var _a17;
6939
+ return (_a17 = toolsAndToolChoice.tools) == null ? void 0 : _a17.map(
7207
6940
  (tool2) => JSON.stringify(tool2)
7208
6941
  );
7209
6942
  }
@@ -7214,32 +6947,34 @@ var DefaultStreamTextResult = class {
7214
6947
  // standardized gen-ai llm span attributes:
7215
6948
  "gen_ai.system": model.provider,
7216
6949
  "gen_ai.request.model": model.modelId,
7217
- "gen_ai.request.frequency_penalty": settings.frequencyPenalty,
7218
- "gen_ai.request.max_tokens": settings.maxOutputTokens,
7219
- "gen_ai.request.presence_penalty": settings.presencePenalty,
7220
- "gen_ai.request.stop_sequences": settings.stopSequences,
7221
- "gen_ai.request.temperature": settings.temperature,
7222
- "gen_ai.request.top_k": settings.topK,
7223
- "gen_ai.request.top_p": settings.topP
6950
+ "gen_ai.request.frequency_penalty": callSettings.frequencyPenalty,
6951
+ "gen_ai.request.max_tokens": callSettings.maxOutputTokens,
6952
+ "gen_ai.request.presence_penalty": callSettings.presencePenalty,
6953
+ "gen_ai.request.stop_sequences": callSettings.stopSequences,
6954
+ "gen_ai.request.temperature": callSettings.temperature,
6955
+ "gen_ai.request.top_k": callSettings.topK,
6956
+ "gen_ai.request.top_p": callSettings.topP
7224
6957
  }
7225
6958
  }),
7226
6959
  tracer,
7227
6960
  endWhenDone: false,
7228
- fn: async (doStreamSpan2) => ({
7229
- startTimestampMs: now2(),
7230
- // get before the call
7231
- doStreamSpan: doStreamSpan2,
7232
- result: await model.doStream({
7233
- ...prepareCallSettings(settings),
7234
- ...toolsAndToolChoice,
7235
- inputFormat: promptFormat,
7236
- responseFormat: output == null ? void 0 : output.responseFormat({ model }),
7237
- prompt: promptMessages,
7238
- providerOptions,
7239
- abortSignal,
7240
- headers
7241
- })
7242
- })
6961
+ fn: async (doStreamSpan2) => {
6962
+ return {
6963
+ startTimestampMs: now2(),
6964
+ // get before the call
6965
+ doStreamSpan: doStreamSpan2,
6966
+ result: await model.doStream({
6967
+ ...callSettings,
6968
+ ...toolsAndToolChoice,
6969
+ inputFormat: promptFormat,
6970
+ responseFormat: output == null ? void 0 : output.responseFormat,
6971
+ prompt: promptMessages,
6972
+ providerOptions,
6973
+ abortSignal,
6974
+ headers
6975
+ })
6976
+ };
6977
+ }
7243
6978
  })
7244
6979
  );
7245
6980
  const transformedStream = runToolsTransformation({
@@ -7294,7 +7029,7 @@ var DefaultStreamTextResult = class {
7294
7029
  transformedStream.pipeThrough(
7295
7030
  new TransformStream({
7296
7031
  async transform(chunk, controller) {
7297
- var _a19, _b, _c;
7032
+ var _a17, _b, _c;
7298
7033
  if (chunk.type === "stream-start") {
7299
7034
  warnings = chunk.warnings;
7300
7035
  return;
@@ -7385,7 +7120,7 @@ var DefaultStreamTextResult = class {
7385
7120
  }
7386
7121
  case "response-metadata": {
7387
7122
  stepResponse = {
7388
- id: (_a19 = chunk.id) != null ? _a19 : stepResponse.id,
7123
+ id: (_a17 = chunk.id) != null ? _a17 : stepResponse.id,
7389
7124
  timestamp: (_b = chunk.timestamp) != null ? _b : stepResponse.timestamp,
7390
7125
  modelId: (_c = chunk.modelId) != null ? _c : stepResponse.modelId
7391
7126
  };
@@ -8121,17 +7856,32 @@ function defaultSettingsMiddleware({
8121
7856
  return {
8122
7857
  middlewareVersion: "v2",
8123
7858
  transformParams: async ({ params }) => {
8124
- var _a17;
7859
+ var _a17, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k;
8125
7860
  return {
8126
7861
  ...settings,
8127
7862
  ...params,
7863
+ // map all values that are null to undefined
7864
+ maxOutputTokens: settings.maxOutputTokens !== null ? (_a17 = params.maxOutputTokens) != null ? _a17 : settings.maxOutputTokens : void 0,
7865
+ temperature: settings.temperature !== null ? (
7866
+ // temperature: special case 0 or null
7867
+ params.temperature === 0 || params.temperature == null ? (_b = settings.temperature) != null ? _b : params.temperature : params.temperature
7868
+ ) : void 0,
7869
+ stopSequences: settings.stopSequences !== null ? (_c = params.stopSequences) != null ? _c : settings.stopSequences : void 0,
7870
+ topP: settings.topP !== null ? (_d = params.topP) != null ? _d : settings.topP : void 0,
7871
+ topK: settings.topK !== null ? (_e = params.topK) != null ? _e : settings.topK : void 0,
7872
+ presencePenalty: settings.presencePenalty !== null ? (_f = params.presencePenalty) != null ? _f : settings.presencePenalty : void 0,
7873
+ frequencyPenalty: settings.frequencyPenalty !== null ? (_g = params.frequencyPenalty) != null ? _g : settings.frequencyPenalty : void 0,
7874
+ responseFormat: settings.responseFormat !== null ? (_h = params.responseFormat) != null ? _h : settings.responseFormat : void 0,
7875
+ seed: settings.seed !== null ? (_i = params.seed) != null ? _i : settings.seed : void 0,
7876
+ tools: settings.tools !== null ? (_j = params.tools) != null ? _j : settings.tools : void 0,
7877
+ toolChoice: settings.toolChoice !== null ? (_k = params.toolChoice) != null ? _k : settings.toolChoice : void 0,
7878
+ // headers: deep merge
7879
+ headers: mergeObjects(settings.headers, params.headers),
7880
+ // provider options: deep merge
8128
7881
  providerOptions: mergeObjects(
8129
7882
  settings.providerOptions,
8130
7883
  params.providerOptions
8131
- ),
8132
- // special case for temperature 0
8133
- // TODO remove when temperature defaults to undefined
8134
- temperature: params.temperature === 0 || params.temperature == null ? (_a17 = settings.temperature) != null ? _a17 : 0 : params.temperature
7884
+ )
8135
7885
  };
8136
7886
  }
8137
7887
  };
@@ -8319,7 +8069,6 @@ var doWrap = ({
8319
8069
  modelId,
8320
8070
  providerId
8321
8071
  }) => {
8322
- var _a17;
8323
8072
  async function doTransform({
8324
8073
  params,
8325
8074
  type
@@ -8330,10 +8079,10 @@ var doWrap = ({
8330
8079
  specificationVersion: "v2",
8331
8080
  provider: providerId != null ? providerId : model.provider,
8332
8081
  modelId: modelId != null ? modelId : model.modelId,
8333
- defaultObjectGenerationMode: model.defaultObjectGenerationMode,
8334
- supportsImageUrls: model.supportsImageUrls,
8335
- supportsUrl: (_a17 = model.supportsUrl) == null ? void 0 : _a17.bind(model),
8336
- supportsStructuredOutputs: model.supportsStructuredOutputs,
8082
+ // TODO middleware should be able to modify the supported urls
8083
+ async getSupportedUrls() {
8084
+ return model.getSupportedUrls();
8085
+ },
8337
8086
  async doGenerate(params) {
8338
8087
  const transformedParams = await doTransform({ params, type: "generate" });
8339
8088
  const doGenerate = async () => model.doGenerate(transformedParams);
@@ -8695,7 +8444,7 @@ function tool(tool2) {
8695
8444
  }
8696
8445
 
8697
8446
  // core/tool/mcp/mcp-sse-transport.ts
8698
- var import_provider_utils19 = require("@ai-sdk/provider-utils");
8447
+ var import_provider_utils20 = require("@ai-sdk/provider-utils");
8699
8448
 
8700
8449
  // core/tool/mcp/json-rpc-message.ts
8701
8450
  var import_zod9 = require("zod");
@@ -8866,7 +8615,7 @@ var SseMCPTransport = class {
8866
8615
  (_b = this.onerror) == null ? void 0 : _b.call(this, error);
8867
8616
  return reject(error);
8868
8617
  }
8869
- const stream = response.body.pipeThrough(new TextDecoderStream()).pipeThrough((0, import_provider_utils19.createEventSourceParserStream)());
8618
+ const stream = response.body.pipeThrough(new TextDecoderStream()).pipeThrough((0, import_provider_utils20.createEventSourceParserStream)());
8870
8619
  const reader = stream.getReader();
8871
8620
  const processEvents = async () => {
8872
8621
  var _a18, _b2, _c2;
@@ -9271,7 +9020,7 @@ function cosineSimilarity(vector1, vector2) {
9271
9020
  }
9272
9021
 
9273
9022
  // core/util/simulate-readable-stream.ts
9274
- var import_provider_utils20 = require("@ai-sdk/provider-utils");
9023
+ var import_provider_utils21 = require("@ai-sdk/provider-utils");
9275
9024
  function simulateReadableStream({
9276
9025
  chunks,
9277
9026
  initialDelayInMs = 0,
@@ -9279,7 +9028,7 @@ function simulateReadableStream({
9279
9028
  _internal
9280
9029
  }) {
9281
9030
  var _a17;
9282
- const delay2 = (_a17 = _internal == null ? void 0 : _internal.delay) != null ? _a17 : import_provider_utils20.delay;
9031
+ const delay2 = (_a17 = _internal == null ? void 0 : _internal.delay) != null ? _a17 : import_provider_utils21.delay;
9283
9032
  let index = 0;
9284
9033
  return new ReadableStream({
9285
9034
  async pull(controller) {
@@ -9406,10 +9155,10 @@ __export(llamaindex_adapter_exports, {
9406
9155
  toDataStream: () => toDataStream2,
9407
9156
  toDataStreamResponse: () => toDataStreamResponse2
9408
9157
  });
9409
- var import_provider_utils22 = require("@ai-sdk/provider-utils");
9158
+ var import_provider_utils23 = require("@ai-sdk/provider-utils");
9410
9159
  function toDataStreamInternal2(stream, callbacks) {
9411
9160
  const trimStart = trimStartOfStream();
9412
- return (0, import_provider_utils22.convertAsyncIteratorToReadableStream)(stream[Symbol.asyncIterator]()).pipeThrough(
9161
+ return (0, import_provider_utils23.convertAsyncIteratorToReadableStream)(stream[Symbol.asyncIterator]()).pipeThrough(
9413
9162
  new TransformStream({
9414
9163
  async transform(message, controller) {
9415
9164
  controller.enqueue(trimStart(message.delta));