ai 5.0.0-canary.10 → 5.0.0-canary.11

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -2601,6 +2601,17 @@ var NoObjectGeneratedError = class extends AISDKError4 {
2601
2601
  };
2602
2602
  _a4 = symbol4;
2603
2603
 
2604
+ // core/generate-text/extract-content-text.ts
2605
+ function extractContentText(content) {
2606
+ const parts = content.filter(
2607
+ (content2) => content2.type === "text"
2608
+ );
2609
+ if (parts.length === 0) {
2610
+ return void 0;
2611
+ }
2612
+ return parts.map((content2) => content2.text).join("");
2613
+ }
2614
+
2604
2615
  // util/download-error.ts
2605
2616
  import { AISDKError as AISDKError5 } from "@ai-sdk/provider";
2606
2617
  var name5 = "AI_DownloadError";
@@ -2798,17 +2809,16 @@ var InvalidMessageRoleError = class extends AISDKError8 {
2798
2809
  _a7 = symbol7;
2799
2810
 
2800
2811
  // core/prompt/convert-to-language-model-prompt.ts
2812
+ import { isUrlSupported } from "@ai-sdk/provider-utils";
2801
2813
  async function convertToLanguageModelPrompt({
2802
2814
  prompt,
2803
- modelSupportsImageUrls = true,
2804
- modelSupportsUrl = () => false,
2815
+ supportedUrls,
2805
2816
  downloadImplementation = download
2806
2817
  }) {
2807
2818
  const downloadedAssets = await downloadAssets(
2808
2819
  prompt.messages,
2809
2820
  downloadImplementation,
2810
- modelSupportsImageUrls,
2811
- modelSupportsUrl
2821
+ supportedUrls
2812
2822
  );
2813
2823
  return [
2814
2824
  ...prompt.system != null ? [{ role: "system", content: prompt.system }] : [],
@@ -2927,19 +2937,29 @@ function convertToLanguageModelMessage(message, downloadedAssets) {
2927
2937
  }
2928
2938
  }
2929
2939
  }
2930
- async function downloadAssets(messages, downloadImplementation, modelSupportsImageUrls, modelSupportsUrl) {
2940
+ async function downloadAssets(messages, downloadImplementation, supportedUrls) {
2931
2941
  const urls = messages.filter((message) => message.role === "user").map((message) => message.content).filter(
2932
2942
  (content) => Array.isArray(content)
2933
2943
  ).flat().filter(
2934
2944
  (part) => part.type === "image" || part.type === "file"
2935
- ).filter(
2936
- (part) => !(part.type === "image" && modelSupportsImageUrls === true)
2937
- ).map((part) => part.type === "image" ? part.image : part.data).map(
2938
- (part) => (
2939
- // support string urls:
2940
- typeof part === "string" && (part.startsWith("http:") || part.startsWith("https:")) ? new URL(part) : part
2941
- )
2942
- ).filter((image) => image instanceof URL).filter((url) => !modelSupportsUrl(url));
2945
+ ).map((part) => {
2946
+ var _a17, _b;
2947
+ const mediaType = (_b = (_a17 = part.mediaType) != null ? _a17 : part.mimeType) != null ? _b : part.type === "image" ? "image/*" : void 0;
2948
+ let data = part.type === "image" ? part.image : part.data;
2949
+ if (typeof data === "string") {
2950
+ try {
2951
+ data = new URL(data);
2952
+ } catch (ignored) {
2953
+ }
2954
+ }
2955
+ return { mediaType, data };
2956
+ }).filter(
2957
+ (part) => part.data instanceof URL && part.mediaType != null && !isUrlSupported({
2958
+ url: part.data.toString(),
2959
+ mediaType: part.mediaType,
2960
+ supportedUrls
2961
+ })
2962
+ ).map((part) => part.data);
2943
2963
  const downloadedImages = await Promise.all(
2944
2964
  urls.map(async (url) => ({
2945
2965
  url,
@@ -3093,8 +3113,7 @@ function prepareCallSettings({
3093
3113
  }
3094
3114
  return {
3095
3115
  maxOutputTokens,
3096
- // TODO v5 remove default 0 for temperature
3097
- temperature: temperature != null ? temperature : 0,
3116
+ temperature: temperature != null ? temperature : temperature === null ? void 0 : 0,
3098
3117
  topP,
3099
3118
  topK,
3100
3119
  presencePenalty,
@@ -3713,26 +3732,6 @@ function addLanguageModelUsage(usage1, usage2) {
3713
3732
  };
3714
3733
  }
3715
3734
 
3716
- // core/generate-object/inject-json-instruction.ts
3717
- var DEFAULT_SCHEMA_PREFIX = "JSON schema:";
3718
- var DEFAULT_SCHEMA_SUFFIX = "You MUST answer with a JSON object that matches the JSON schema above.";
3719
- var DEFAULT_GENERIC_SUFFIX = "You MUST answer with JSON.";
3720
- function injectJsonInstruction({
3721
- prompt,
3722
- schema,
3723
- schemaPrefix = schema != null ? DEFAULT_SCHEMA_PREFIX : void 0,
3724
- schemaSuffix = schema != null ? DEFAULT_SCHEMA_SUFFIX : DEFAULT_GENERIC_SUFFIX
3725
- }) {
3726
- return [
3727
- prompt != null && prompt.length > 0 ? prompt : void 0,
3728
- prompt != null && prompt.length > 0 ? "" : void 0,
3729
- // add a newline if prompt is not null
3730
- schemaPrefix,
3731
- schema != null ? JSON.stringify(schema) : void 0,
3732
- schemaSuffix
3733
- ].filter((line) => line != null).join("\n");
3734
- }
3735
-
3736
3735
  // core/generate-object/output-strategy.ts
3737
3736
  import {
3738
3737
  isJSONArray,
@@ -3985,7 +3984,6 @@ function getOutputStrategy({
3985
3984
  // core/generate-object/validate-object-generation-input.ts
3986
3985
  function validateObjectGenerationInput({
3987
3986
  output,
3988
- mode,
3989
3987
  schema,
3990
3988
  schemaName,
3991
3989
  schemaDescription,
@@ -3999,13 +3997,6 @@ function validateObjectGenerationInput({
3999
3997
  });
4000
3998
  }
4001
3999
  if (output === "no-schema") {
4002
- if (mode === "auto" || mode === "tool") {
4003
- throw new InvalidArgumentError({
4004
- parameter: "mode",
4005
- value: mode,
4006
- message: 'Mode must be "json" for no-schema output.'
4007
- });
4008
- }
4009
4000
  if (schema != null) {
4010
4001
  throw new InvalidArgumentError({
4011
4002
  parameter: "schema",
@@ -4108,17 +4099,6 @@ function validateObjectGenerationInput({
4108
4099
  }
4109
4100
  }
4110
4101
 
4111
- // core/generate-text/extract-content-text.ts
4112
- function extractContentText(content) {
4113
- const parts = content.filter(
4114
- (content2) => content2.type === "text"
4115
- );
4116
- if (parts.length === 0) {
4117
- return void 0;
4118
- }
4119
- return parts.map((content2) => content2.text).join("");
4120
- }
4121
-
4122
4102
  // core/generate-object/generate-object.ts
4123
4103
  var originalGenerateId = createIdGenerator({ prefix: "aiobj", size: 24 });
4124
4104
  async function generateObject({
@@ -4128,7 +4108,6 @@ async function generateObject({
4128
4108
  schema: inputSchema,
4129
4109
  schemaName,
4130
4110
  schemaDescription,
4131
- mode,
4132
4111
  output = "object",
4133
4112
  system,
4134
4113
  prompt,
@@ -4147,7 +4126,6 @@ async function generateObject({
4147
4126
  }) {
4148
4127
  validateObjectGenerationInput({
4149
4128
  output,
4150
- mode,
4151
4129
  schema: inputSchema,
4152
4130
  schemaName,
4153
4131
  schemaDescription,
@@ -4159,14 +4137,12 @@ async function generateObject({
4159
4137
  schema: inputSchema,
4160
4138
  enumValues
4161
4139
  });
4162
- if (outputStrategy.type === "no-schema" && mode === void 0) {
4163
- mode = "json";
4164
- }
4140
+ const callSettings = prepareCallSettings(settings);
4165
4141
  const baseTelemetryAttributes = getBaseTelemetryAttributes({
4166
4142
  model,
4167
4143
  telemetry,
4168
4144
  headers,
4169
- settings: { ...settings, maxRetries }
4145
+ settings: { ...callSettings, maxRetries }
4170
4146
  });
4171
4147
  const tracer = getTracer(telemetry);
4172
4148
  return recordSpan({
@@ -4186,16 +4162,12 @@ async function generateObject({
4186
4162
  "ai.schema": outputStrategy.jsonSchema != null ? { input: () => JSON.stringify(outputStrategy.jsonSchema) } : void 0,
4187
4163
  "ai.schema.name": schemaName,
4188
4164
  "ai.schema.description": schemaDescription,
4189
- "ai.settings.output": outputStrategy.type,
4190
- "ai.settings.mode": mode
4165
+ "ai.settings.output": outputStrategy.type
4191
4166
  }
4192
4167
  }),
4193
4168
  tracer,
4194
4169
  fn: async (span) => {
4195
- var _a17, _b, _c, _d;
4196
- if (mode === "auto" || mode == null) {
4197
- mode = model.defaultObjectGenerationMode;
4198
- }
4170
+ var _a17;
4199
4171
  let result;
4200
4172
  let finishReason;
4201
4173
  let usage;
@@ -4204,247 +4176,108 @@ async function generateObject({
4204
4176
  let request;
4205
4177
  let logprobs;
4206
4178
  let resultProviderMetadata;
4207
- switch (mode) {
4208
- case "json": {
4209
- const standardizedPrompt = standardizePrompt({
4210
- prompt: {
4211
- system: outputStrategy.jsonSchema == null ? injectJsonInstruction({ prompt: system }) : model.supportsStructuredOutputs ? system : injectJsonInstruction({
4212
- prompt: system,
4213
- schema: outputStrategy.jsonSchema
4214
- }),
4215
- prompt,
4216
- messages
4217
- },
4218
- tools: void 0
4219
- });
4220
- const promptMessages = await convertToLanguageModelPrompt({
4221
- prompt: standardizedPrompt,
4222
- modelSupportsImageUrls: model.supportsImageUrls,
4223
- modelSupportsUrl: (_a17 = model.supportsUrl) == null ? void 0 : _a17.bind(model)
4224
- // support 'this' context
4225
- });
4226
- const generateResult = await retry(
4227
- () => recordSpan({
4228
- name: "ai.generateObject.doGenerate",
4229
- attributes: selectTelemetryAttributes({
4230
- telemetry,
4231
- attributes: {
4232
- ...assembleOperationName({
4233
- operationId: "ai.generateObject.doGenerate",
4234
- telemetry
4235
- }),
4236
- ...baseTelemetryAttributes,
4237
- "ai.prompt.format": {
4238
- input: () => standardizedPrompt.type
4239
- },
4240
- "ai.prompt.messages": {
4241
- input: () => JSON.stringify(promptMessages)
4242
- },
4243
- "ai.settings.mode": mode,
4244
- // standardized gen-ai llm span attributes:
4245
- "gen_ai.system": model.provider,
4246
- "gen_ai.request.model": model.modelId,
4247
- "gen_ai.request.frequency_penalty": settings.frequencyPenalty,
4248
- "gen_ai.request.max_tokens": settings.maxOutputTokens,
4249
- "gen_ai.request.presence_penalty": settings.presencePenalty,
4250
- "gen_ai.request.temperature": settings.temperature,
4251
- "gen_ai.request.top_k": settings.topK,
4252
- "gen_ai.request.top_p": settings.topP
4253
- }
4179
+ const standardizedPrompt = standardizePrompt({
4180
+ prompt: { system, prompt, messages },
4181
+ tools: void 0
4182
+ });
4183
+ const promptMessages = await convertToLanguageModelPrompt({
4184
+ prompt: standardizedPrompt,
4185
+ supportedUrls: await model.getSupportedUrls()
4186
+ });
4187
+ const generateResult = await retry(
4188
+ () => recordSpan({
4189
+ name: "ai.generateObject.doGenerate",
4190
+ attributes: selectTelemetryAttributes({
4191
+ telemetry,
4192
+ attributes: {
4193
+ ...assembleOperationName({
4194
+ operationId: "ai.generateObject.doGenerate",
4195
+ telemetry
4254
4196
  }),
4255
- tracer,
4256
- fn: async (span2) => {
4257
- var _a18, _b2, _c2, _d2, _e, _f, _g, _h;
4258
- const result2 = await model.doGenerate({
4259
- responseFormat: {
4260
- type: "json",
4261
- schema: outputStrategy.jsonSchema,
4262
- name: schemaName,
4263
- description: schemaDescription
4264
- },
4265
- ...prepareCallSettings(settings),
4266
- inputFormat: standardizedPrompt.type,
4267
- prompt: promptMessages,
4268
- providerOptions,
4269
- abortSignal,
4270
- headers
4271
- });
4272
- const responseData = {
4273
- id: (_b2 = (_a18 = result2.response) == null ? void 0 : _a18.id) != null ? _b2 : generateId3(),
4274
- timestamp: (_d2 = (_c2 = result2.response) == null ? void 0 : _c2.timestamp) != null ? _d2 : currentDate(),
4275
- modelId: (_f = (_e = result2.response) == null ? void 0 : _e.modelId) != null ? _f : model.modelId,
4276
- headers: (_g = result2.response) == null ? void 0 : _g.headers,
4277
- body: (_h = result2.response) == null ? void 0 : _h.body
4278
- };
4279
- const text2 = extractContentText(result2.content);
4280
- if (text2 === void 0) {
4281
- throw new NoObjectGeneratedError({
4282
- message: "No object generated: the model did not return a response.",
4283
- response: responseData,
4284
- usage: calculateLanguageModelUsage2(result2.usage),
4285
- finishReason: result2.finishReason
4286
- });
4287
- }
4288
- span2.setAttributes(
4289
- selectTelemetryAttributes({
4290
- telemetry,
4291
- attributes: {
4292
- "ai.response.finishReason": result2.finishReason,
4293
- "ai.response.object": { output: () => text2 },
4294
- "ai.response.id": responseData.id,
4295
- "ai.response.model": responseData.modelId,
4296
- "ai.response.timestamp": responseData.timestamp.toISOString(),
4297
- // TODO rename telemetry attributes to inputTokens and outputTokens
4298
- "ai.usage.promptTokens": result2.usage.inputTokens,
4299
- "ai.usage.completionTokens": result2.usage.outputTokens,
4300
- // standardized gen-ai llm span attributes:
4301
- "gen_ai.response.finish_reasons": [result2.finishReason],
4302
- "gen_ai.response.id": responseData.id,
4303
- "gen_ai.response.model": responseData.modelId,
4304
- "gen_ai.usage.input_tokens": result2.usage.inputTokens,
4305
- "gen_ai.usage.output_tokens": result2.usage.outputTokens
4306
- }
4307
- })
4308
- );
4309
- return { ...result2, objectText: text2, responseData };
4310
- }
4311
- })
4312
- );
4313
- result = generateResult.objectText;
4314
- finishReason = generateResult.finishReason;
4315
- usage = generateResult.usage;
4316
- warnings = generateResult.warnings;
4317
- logprobs = generateResult.logprobs;
4318
- resultProviderMetadata = generateResult.providerMetadata;
4319
- request = (_b = generateResult.request) != null ? _b : {};
4320
- response = generateResult.responseData;
4321
- break;
4322
- }
4323
- case "tool": {
4324
- const standardizedPrompt = standardizePrompt({
4325
- prompt: { system, prompt, messages },
4326
- tools: void 0
4327
- });
4328
- const promptMessages = await convertToLanguageModelPrompt({
4329
- prompt: standardizedPrompt,
4330
- modelSupportsImageUrls: model.supportsImageUrls,
4331
- modelSupportsUrl: (_c = model.supportsUrl) == null ? void 0 : _c.bind(model)
4332
- // support 'this' context,
4333
- });
4334
- const inputFormat = standardizedPrompt.type;
4335
- const generateResult = await retry(
4336
- () => recordSpan({
4337
- name: "ai.generateObject.doGenerate",
4338
- attributes: selectTelemetryAttributes({
4197
+ ...baseTelemetryAttributes,
4198
+ "ai.prompt.format": {
4199
+ input: () => standardizedPrompt.type
4200
+ },
4201
+ "ai.prompt.messages": {
4202
+ input: () => JSON.stringify(promptMessages)
4203
+ },
4204
+ // standardized gen-ai llm span attributes:
4205
+ "gen_ai.system": model.provider,
4206
+ "gen_ai.request.model": model.modelId,
4207
+ "gen_ai.request.frequency_penalty": callSettings.frequencyPenalty,
4208
+ "gen_ai.request.max_tokens": callSettings.maxOutputTokens,
4209
+ "gen_ai.request.presence_penalty": callSettings.presencePenalty,
4210
+ "gen_ai.request.temperature": callSettings.temperature,
4211
+ "gen_ai.request.top_k": callSettings.topK,
4212
+ "gen_ai.request.top_p": callSettings.topP
4213
+ }
4214
+ }),
4215
+ tracer,
4216
+ fn: async (span2) => {
4217
+ var _a18, _b, _c, _d, _e, _f, _g, _h;
4218
+ const result2 = await model.doGenerate({
4219
+ responseFormat: {
4220
+ type: "json",
4221
+ schema: outputStrategy.jsonSchema,
4222
+ name: schemaName,
4223
+ description: schemaDescription
4224
+ },
4225
+ ...prepareCallSettings(settings),
4226
+ inputFormat: standardizedPrompt.type,
4227
+ prompt: promptMessages,
4228
+ providerOptions,
4229
+ abortSignal,
4230
+ headers
4231
+ });
4232
+ const responseData = {
4233
+ id: (_b = (_a18 = result2.response) == null ? void 0 : _a18.id) != null ? _b : generateId3(),
4234
+ timestamp: (_d = (_c = result2.response) == null ? void 0 : _c.timestamp) != null ? _d : currentDate(),
4235
+ modelId: (_f = (_e = result2.response) == null ? void 0 : _e.modelId) != null ? _f : model.modelId,
4236
+ headers: (_g = result2.response) == null ? void 0 : _g.headers,
4237
+ body: (_h = result2.response) == null ? void 0 : _h.body
4238
+ };
4239
+ const text2 = extractContentText(result2.content);
4240
+ if (text2 === void 0) {
4241
+ throw new NoObjectGeneratedError({
4242
+ message: "No object generated: the model did not return a response.",
4243
+ response: responseData,
4244
+ usage: calculateLanguageModelUsage2(result2.usage),
4245
+ finishReason: result2.finishReason
4246
+ });
4247
+ }
4248
+ span2.setAttributes(
4249
+ selectTelemetryAttributes({
4339
4250
  telemetry,
4340
4251
  attributes: {
4341
- ...assembleOperationName({
4342
- operationId: "ai.generateObject.doGenerate",
4343
- telemetry
4344
- }),
4345
- ...baseTelemetryAttributes,
4346
- "ai.prompt.format": {
4347
- input: () => inputFormat
4348
- },
4349
- "ai.prompt.messages": {
4350
- input: () => JSON.stringify(promptMessages)
4351
- },
4352
- "ai.settings.mode": mode,
4252
+ "ai.response.finishReason": result2.finishReason,
4253
+ "ai.response.object": { output: () => text2 },
4254
+ "ai.response.id": responseData.id,
4255
+ "ai.response.model": responseData.modelId,
4256
+ "ai.response.timestamp": responseData.timestamp.toISOString(),
4257
+ // TODO rename telemetry attributes to inputTokens and outputTokens
4258
+ "ai.usage.promptTokens": result2.usage.inputTokens,
4259
+ "ai.usage.completionTokens": result2.usage.outputTokens,
4353
4260
  // standardized gen-ai llm span attributes:
4354
- "gen_ai.system": model.provider,
4355
- "gen_ai.request.model": model.modelId,
4356
- "gen_ai.request.frequency_penalty": settings.frequencyPenalty,
4357
- "gen_ai.request.max_tokens": settings.maxOutputTokens,
4358
- "gen_ai.request.presence_penalty": settings.presencePenalty,
4359
- "gen_ai.request.temperature": settings.temperature,
4360
- "gen_ai.request.top_k": settings.topK,
4361
- "gen_ai.request.top_p": settings.topP
4362
- }
4363
- }),
4364
- tracer,
4365
- fn: async (span2) => {
4366
- var _a18, _b2, _c2, _d2, _e, _f, _g, _h;
4367
- const result2 = await model.doGenerate({
4368
- tools: [
4369
- {
4370
- type: "function",
4371
- name: schemaName != null ? schemaName : "json",
4372
- description: schemaDescription != null ? schemaDescription : "Respond with a JSON object.",
4373
- parameters: outputStrategy.jsonSchema
4374
- }
4375
- ],
4376
- toolChoice: { type: "required" },
4377
- ...prepareCallSettings(settings),
4378
- inputFormat,
4379
- prompt: promptMessages,
4380
- providerOptions,
4381
- abortSignal,
4382
- headers
4383
- });
4384
- const firstToolCall = result2.content.find(
4385
- (content) => content.type === "tool-call"
4386
- );
4387
- const objectText = firstToolCall == null ? void 0 : firstToolCall.args;
4388
- const responseData = {
4389
- id: (_b2 = (_a18 = result2.response) == null ? void 0 : _a18.id) != null ? _b2 : generateId3(),
4390
- timestamp: (_d2 = (_c2 = result2.response) == null ? void 0 : _c2.timestamp) != null ? _d2 : currentDate(),
4391
- modelId: (_f = (_e = result2.response) == null ? void 0 : _e.modelId) != null ? _f : model.modelId,
4392
- headers: (_g = result2.response) == null ? void 0 : _g.headers,
4393
- body: (_h = result2.response) == null ? void 0 : _h.body
4394
- };
4395
- if (objectText === void 0) {
4396
- throw new NoObjectGeneratedError({
4397
- message: "No object generated: the tool was not called.",
4398
- response: responseData,
4399
- usage: calculateLanguageModelUsage2(result2.usage),
4400
- finishReason: result2.finishReason
4401
- });
4261
+ "gen_ai.response.finish_reasons": [result2.finishReason],
4262
+ "gen_ai.response.id": responseData.id,
4263
+ "gen_ai.response.model": responseData.modelId,
4264
+ "gen_ai.usage.input_tokens": result2.usage.inputTokens,
4265
+ "gen_ai.usage.output_tokens": result2.usage.outputTokens
4402
4266
  }
4403
- span2.setAttributes(
4404
- selectTelemetryAttributes({
4405
- telemetry,
4406
- attributes: {
4407
- "ai.response.finishReason": result2.finishReason,
4408
- "ai.response.object": { output: () => objectText },
4409
- "ai.response.id": responseData.id,
4410
- "ai.response.model": responseData.modelId,
4411
- "ai.response.timestamp": responseData.timestamp.toISOString(),
4412
- // TODO rename telemetry attributes to inputTokens and outputTokens
4413
- "ai.usage.promptTokens": result2.usage.inputTokens,
4414
- "ai.usage.completionTokens": result2.usage.outputTokens,
4415
- // standardized gen-ai llm span attributes:
4416
- "gen_ai.response.finish_reasons": [result2.finishReason],
4417
- "gen_ai.response.id": responseData.id,
4418
- "gen_ai.response.model": responseData.modelId,
4419
- "gen_ai.usage.input_tokens": result2.usage.inputTokens,
4420
- "gen_ai.usage.output_tokens": result2.usage.outputTokens
4421
- }
4422
- })
4423
- );
4424
- return { ...result2, objectText, responseData };
4425
- }
4426
- })
4427
- );
4428
- result = generateResult.objectText;
4429
- finishReason = generateResult.finishReason;
4430
- usage = generateResult.usage;
4431
- warnings = generateResult.warnings;
4432
- logprobs = generateResult.logprobs;
4433
- resultProviderMetadata = generateResult.providerMetadata;
4434
- request = (_d = generateResult.request) != null ? _d : {};
4435
- response = generateResult.responseData;
4436
- break;
4437
- }
4438
- case void 0: {
4439
- throw new Error(
4440
- "Model does not have a default object generation mode."
4441
- );
4442
- }
4443
- default: {
4444
- const _exhaustiveCheck = mode;
4445
- throw new Error(`Unsupported mode: ${_exhaustiveCheck}`);
4446
- }
4447
- }
4267
+ })
4268
+ );
4269
+ return { ...result2, objectText: text2, responseData };
4270
+ }
4271
+ })
4272
+ );
4273
+ result = generateResult.objectText;
4274
+ finishReason = generateResult.finishReason;
4275
+ usage = generateResult.usage;
4276
+ warnings = generateResult.warnings;
4277
+ logprobs = generateResult.logprobs;
4278
+ resultProviderMetadata = generateResult.providerMetadata;
4279
+ request = (_a17 = generateResult.request) != null ? _a17 : {};
4280
+ response = generateResult.responseData;
4448
4281
  function processResult(result2) {
4449
4282
  const parseResult = safeParseJSON2({ text: result2 });
4450
4283
  if (!parseResult.success) {
@@ -4694,7 +4527,6 @@ function streamObject({
4694
4527
  schema: inputSchema,
4695
4528
  schemaName,
4696
4529
  schemaDescription,
4697
- mode,
4698
4530
  output = "object",
4699
4531
  system,
4700
4532
  prompt,
@@ -4715,15 +4547,11 @@ function streamObject({
4715
4547
  }) {
4716
4548
  validateObjectGenerationInput({
4717
4549
  output,
4718
- mode,
4719
4550
  schema: inputSchema,
4720
4551
  schemaName,
4721
4552
  schemaDescription
4722
4553
  });
4723
4554
  const outputStrategy = getOutputStrategy({ output, schema: inputSchema });
4724
- if (outputStrategy.type === "no-schema" && mode === void 0) {
4725
- mode = "json";
4726
- }
4727
4555
  return new DefaultStreamObjectResult({
4728
4556
  model,
4729
4557
  telemetry,
@@ -4738,7 +4566,6 @@ function streamObject({
4738
4566
  schemaName,
4739
4567
  schemaDescription,
4740
4568
  providerOptions,
4741
- mode,
4742
4569
  onError,
4743
4570
  onFinish,
4744
4571
  generateId: generateId3,
@@ -4761,7 +4588,6 @@ var DefaultStreamObjectResult = class {
4761
4588
  schemaName,
4762
4589
  schemaDescription,
4763
4590
  providerOptions,
4764
- mode,
4765
4591
  onError,
4766
4592
  onFinish,
4767
4593
  generateId: generateId3,
@@ -4777,11 +4603,12 @@ var DefaultStreamObjectResult = class {
4777
4603
  const { maxRetries, retry } = prepareRetries({
4778
4604
  maxRetries: maxRetriesArg
4779
4605
  });
4606
+ const callSettings = prepareCallSettings(settings);
4780
4607
  const baseTelemetryAttributes = getBaseTelemetryAttributes({
4781
4608
  model,
4782
4609
  telemetry,
4783
4610
  headers,
4784
- settings: { ...settings, maxRetries }
4611
+ settings: { ...callSettings, maxRetries }
4785
4612
  });
4786
4613
  const tracer = getTracer(telemetry);
4787
4614
  const self = this;
@@ -4812,120 +4639,47 @@ var DefaultStreamObjectResult = class {
4812
4639
  "ai.schema": outputStrategy.jsonSchema != null ? { input: () => JSON.stringify(outputStrategy.jsonSchema) } : void 0,
4813
4640
  "ai.schema.name": schemaName,
4814
4641
  "ai.schema.description": schemaDescription,
4815
- "ai.settings.output": outputStrategy.type,
4816
- "ai.settings.mode": mode
4642
+ "ai.settings.output": outputStrategy.type
4817
4643
  }
4818
4644
  }),
4819
4645
  tracer,
4820
4646
  endWhenDone: false,
4821
4647
  fn: async (rootSpan) => {
4822
- var _a17, _b;
4823
- if (mode === "auto" || mode == null) {
4824
- mode = model.defaultObjectGenerationMode;
4825
- }
4826
- let callOptions;
4827
- let transformer;
4828
- switch (mode) {
4829
- case "json": {
4830
- const standardizedPrompt = standardizePrompt({
4831
- prompt: {
4832
- system: outputStrategy.jsonSchema == null ? injectJsonInstruction({ prompt: system }) : model.supportsStructuredOutputs ? system : injectJsonInstruction({
4833
- prompt: system,
4834
- schema: outputStrategy.jsonSchema
4835
- }),
4836
- prompt,
4837
- messages
4838
- },
4839
- tools: void 0
4840
- });
4841
- callOptions = {
4842
- responseFormat: {
4843
- type: "json",
4844
- schema: outputStrategy.jsonSchema,
4845
- name: schemaName,
4846
- description: schemaDescription
4847
- },
4848
- ...prepareCallSettings(settings),
4849
- inputFormat: standardizedPrompt.type,
4850
- prompt: await convertToLanguageModelPrompt({
4851
- prompt: standardizedPrompt,
4852
- modelSupportsImageUrls: model.supportsImageUrls,
4853
- modelSupportsUrl: (_a17 = model.supportsUrl) == null ? void 0 : _a17.bind(model)
4854
- // support 'this' context
4855
- }),
4856
- providerOptions,
4857
- abortSignal,
4858
- headers
4859
- };
4860
- transformer = {
4861
- transform: (chunk, controller) => {
4862
- switch (chunk.type) {
4863
- case "text":
4864
- controller.enqueue(chunk.text);
4865
- break;
4866
- case "response-metadata":
4867
- case "finish":
4868
- case "error":
4869
- controller.enqueue(chunk);
4870
- break;
4871
- }
4872
- }
4873
- };
4874
- break;
4875
- }
4876
- case "tool": {
4877
- const standardizedPrompt = standardizePrompt({
4878
- prompt: { system, prompt, messages },
4879
- tools: void 0
4880
- });
4881
- callOptions = {
4882
- tools: [
4883
- {
4884
- type: "function",
4885
- name: schemaName != null ? schemaName : "json",
4886
- description: schemaDescription != null ? schemaDescription : "Respond with a JSON object.",
4887
- parameters: outputStrategy.jsonSchema
4888
- }
4889
- ],
4890
- toolChoice: { type: "required" },
4891
- ...prepareCallSettings(settings),
4892
- inputFormat: standardizedPrompt.type,
4893
- prompt: await convertToLanguageModelPrompt({
4894
- prompt: standardizedPrompt,
4895
- modelSupportsImageUrls: model.supportsImageUrls,
4896
- modelSupportsUrl: (_b = model.supportsUrl) == null ? void 0 : _b.bind(model)
4897
- // support 'this' context,
4898
- }),
4899
- providerOptions,
4900
- abortSignal,
4901
- headers
4902
- };
4903
- transformer = {
4904
- transform(chunk, controller) {
4905
- switch (chunk.type) {
4906
- case "tool-call-delta":
4907
- controller.enqueue(chunk.argsTextDelta);
4908
- break;
4909
- case "response-metadata":
4910
- case "finish":
4911
- case "error":
4912
- controller.enqueue(chunk);
4913
- break;
4914
- }
4915
- }
4916
- };
4917
- break;
4918
- }
4919
- case void 0: {
4920
- throw new Error(
4921
- "Model does not have a default object generation mode."
4922
- );
4923
- }
4924
- default: {
4925
- const _exhaustiveCheck = mode;
4926
- throw new Error(`Unsupported mode: ${_exhaustiveCheck}`);
4648
+ const standardizedPrompt = standardizePrompt({
4649
+ prompt: { system, prompt, messages },
4650
+ tools: void 0
4651
+ });
4652
+ const callOptions = {
4653
+ responseFormat: {
4654
+ type: "json",
4655
+ schema: outputStrategy.jsonSchema,
4656
+ name: schemaName,
4657
+ description: schemaDescription
4658
+ },
4659
+ ...prepareCallSettings(settings),
4660
+ inputFormat: standardizedPrompt.type,
4661
+ prompt: await convertToLanguageModelPrompt({
4662
+ prompt: standardizedPrompt,
4663
+ supportedUrls: await model.getSupportedUrls()
4664
+ }),
4665
+ providerOptions,
4666
+ abortSignal,
4667
+ headers
4668
+ };
4669
+ const transformer = {
4670
+ transform: (chunk, controller) => {
4671
+ switch (chunk.type) {
4672
+ case "text":
4673
+ controller.enqueue(chunk.text);
4674
+ break;
4675
+ case "response-metadata":
4676
+ case "finish":
4677
+ case "error":
4678
+ controller.enqueue(chunk);
4679
+ break;
4680
+ }
4927
4681
  }
4928
- }
4682
+ };
4929
4683
  const {
4930
4684
  result: { stream, response, request },
4931
4685
  doStreamSpan,
@@ -4947,16 +4701,15 @@ var DefaultStreamObjectResult = class {
4947
4701
  "ai.prompt.messages": {
4948
4702
  input: () => JSON.stringify(callOptions.prompt)
4949
4703
  },
4950
- "ai.settings.mode": mode,
4951
4704
  // standardized gen-ai llm span attributes:
4952
4705
  "gen_ai.system": model.provider,
4953
4706
  "gen_ai.request.model": model.modelId,
4954
- "gen_ai.request.frequency_penalty": settings.frequencyPenalty,
4955
- "gen_ai.request.max_tokens": settings.maxOutputTokens,
4956
- "gen_ai.request.presence_penalty": settings.presencePenalty,
4957
- "gen_ai.request.temperature": settings.temperature,
4958
- "gen_ai.request.top_k": settings.topK,
4959
- "gen_ai.request.top_p": settings.topP
4707
+ "gen_ai.request.frequency_penalty": callSettings.frequencyPenalty,
4708
+ "gen_ai.request.max_tokens": callSettings.maxOutputTokens,
4709
+ "gen_ai.request.presence_penalty": callSettings.presencePenalty,
4710
+ "gen_ai.request.temperature": callSettings.temperature,
4711
+ "gen_ai.request.top_k": callSettings.topK,
4712
+ "gen_ai.request.top_p": callSettings.topP
4960
4713
  }
4961
4714
  }),
4962
4715
  tracer,
@@ -4989,7 +4742,7 @@ var DefaultStreamObjectResult = class {
4989
4742
  const transformedStream = stream.pipeThrough(new TransformStream(transformer)).pipeThrough(
4990
4743
  new TransformStream({
4991
4744
  async transform(chunk, controller) {
4992
- var _a18, _b2, _c;
4745
+ var _a17, _b, _c;
4993
4746
  if (typeof chunk === "object" && chunk.type === "stream-start") {
4994
4747
  warnings = chunk.warnings;
4995
4748
  return;
@@ -5039,8 +4792,8 @@ var DefaultStreamObjectResult = class {
5039
4792
  switch (chunk.type) {
5040
4793
  case "response-metadata": {
5041
4794
  fullResponse = {
5042
- id: (_a18 = chunk.id) != null ? _a18 : fullResponse.id,
5043
- timestamp: (_b2 = chunk.timestamp) != null ? _b2 : fullResponse.timestamp,
4795
+ id: (_a17 = chunk.id) != null ? _a17 : fullResponse.id,
4796
+ timestamp: (_b = chunk.timestamp) != null ? _b : fullResponse.timestamp,
5044
4797
  modelId: (_c = chunk.modelId) != null ? _c : fullResponse.modelId
5045
4798
  };
5046
4799
  break;
@@ -5625,7 +5378,6 @@ async function generateText({
5625
5378
  onStepFinish,
5626
5379
  ...settings
5627
5380
  }) {
5628
- var _a17;
5629
5381
  if (maxSteps < 1) {
5630
5382
  throw new InvalidArgumentError({
5631
5383
  parameter: "maxSteps",
@@ -5634,18 +5386,15 @@ async function generateText({
5634
5386
  });
5635
5387
  }
5636
5388
  const { maxRetries, retry } = prepareRetries({ maxRetries: maxRetriesArg });
5389
+ const callSettings = prepareCallSettings(settings);
5637
5390
  const baseTelemetryAttributes = getBaseTelemetryAttributes({
5638
5391
  model,
5639
5392
  telemetry,
5640
5393
  headers,
5641
- settings: { ...settings, maxRetries }
5394
+ settings: { ...callSettings, maxRetries }
5642
5395
  });
5643
5396
  const initialPrompt = standardizePrompt({
5644
- prompt: {
5645
- system: (_a17 = output == null ? void 0 : output.injectIntoSystemPrompt({ system, model })) != null ? _a17 : system,
5646
- prompt,
5647
- messages
5648
- },
5397
+ prompt: { system, prompt, messages },
5649
5398
  tools
5650
5399
  });
5651
5400
  const tracer = getTracer(telemetry);
@@ -5668,11 +5417,10 @@ async function generateText({
5668
5417
  }),
5669
5418
  tracer,
5670
5419
  fn: async (span) => {
5671
- var _a18, _b, _c, _d;
5420
+ var _a17, _b, _c;
5672
5421
  const toolsAndToolChoice = {
5673
5422
  ...prepareToolsAndToolChoice({ tools, toolChoice, activeTools })
5674
5423
  };
5675
- const callSettings = prepareCallSettings(settings);
5676
5424
  let currentModelResponse;
5677
5425
  let currentToolCalls = [];
5678
5426
  let currentToolResults = [];
@@ -5700,99 +5448,100 @@ async function generateText({
5700
5448
  system: initialPrompt.system,
5701
5449
  messages: stepInputMessages
5702
5450
  },
5703
- modelSupportsImageUrls: model.supportsImageUrls,
5704
- modelSupportsUrl: (_a18 = model.supportsUrl) == null ? void 0 : _a18.bind(model)
5705
- // support 'this' context
5451
+ supportedUrls: await model.getSupportedUrls()
5706
5452
  });
5707
5453
  currentModelResponse = await retry(
5708
- () => recordSpan({
5709
- name: "ai.generateText.doGenerate",
5710
- attributes: selectTelemetryAttributes({
5711
- telemetry,
5712
- attributes: {
5713
- ...assembleOperationName({
5714
- operationId: "ai.generateText.doGenerate",
5715
- telemetry
5716
- }),
5717
- ...baseTelemetryAttributes,
5718
- "ai.prompt.format": { input: () => promptFormat },
5719
- "ai.prompt.messages": {
5720
- input: () => JSON.stringify(promptMessages)
5721
- },
5722
- "ai.prompt.tools": {
5723
- // convert the language model level tools:
5724
- input: () => {
5725
- var _a19;
5726
- return (_a19 = toolsAndToolChoice.tools) == null ? void 0 : _a19.map((tool2) => JSON.stringify(tool2));
5727
- }
5728
- },
5729
- "ai.prompt.toolChoice": {
5730
- input: () => toolsAndToolChoice.toolChoice != null ? JSON.stringify(toolsAndToolChoice.toolChoice) : void 0
5731
- },
5732
- // standardized gen-ai llm span attributes:
5733
- "gen_ai.system": model.provider,
5734
- "gen_ai.request.model": model.modelId,
5735
- "gen_ai.request.frequency_penalty": settings.frequencyPenalty,
5736
- "gen_ai.request.max_tokens": settings.maxOutputTokens,
5737
- "gen_ai.request.presence_penalty": settings.presencePenalty,
5738
- "gen_ai.request.stop_sequences": settings.stopSequences,
5739
- "gen_ai.request.temperature": settings.temperature,
5740
- "gen_ai.request.top_k": settings.topK,
5741
- "gen_ai.request.top_p": settings.topP
5454
+ () => {
5455
+ var _a18;
5456
+ return recordSpan({
5457
+ name: "ai.generateText.doGenerate",
5458
+ attributes: selectTelemetryAttributes({
5459
+ telemetry,
5460
+ attributes: {
5461
+ ...assembleOperationName({
5462
+ operationId: "ai.generateText.doGenerate",
5463
+ telemetry
5464
+ }),
5465
+ ...baseTelemetryAttributes,
5466
+ "ai.prompt.format": { input: () => promptFormat },
5467
+ "ai.prompt.messages": {
5468
+ input: () => JSON.stringify(promptMessages)
5469
+ },
5470
+ "ai.prompt.tools": {
5471
+ // convert the language model level tools:
5472
+ input: () => {
5473
+ var _a19;
5474
+ return (_a19 = toolsAndToolChoice.tools) == null ? void 0 : _a19.map((tool2) => JSON.stringify(tool2));
5475
+ }
5476
+ },
5477
+ "ai.prompt.toolChoice": {
5478
+ input: () => toolsAndToolChoice.toolChoice != null ? JSON.stringify(toolsAndToolChoice.toolChoice) : void 0
5479
+ },
5480
+ // standardized gen-ai llm span attributes:
5481
+ "gen_ai.system": model.provider,
5482
+ "gen_ai.request.model": model.modelId,
5483
+ "gen_ai.request.frequency_penalty": settings.frequencyPenalty,
5484
+ "gen_ai.request.max_tokens": settings.maxOutputTokens,
5485
+ "gen_ai.request.presence_penalty": settings.presencePenalty,
5486
+ "gen_ai.request.stop_sequences": settings.stopSequences,
5487
+ "gen_ai.request.temperature": (_a18 = settings.temperature) != null ? _a18 : void 0,
5488
+ "gen_ai.request.top_k": settings.topK,
5489
+ "gen_ai.request.top_p": settings.topP
5490
+ }
5491
+ }),
5492
+ tracer,
5493
+ fn: async (span2) => {
5494
+ var _a19, _b2, _c2, _d, _e, _f, _g, _h;
5495
+ const result = await model.doGenerate({
5496
+ ...callSettings,
5497
+ ...toolsAndToolChoice,
5498
+ inputFormat: promptFormat,
5499
+ responseFormat: output == null ? void 0 : output.responseFormat,
5500
+ prompt: promptMessages,
5501
+ providerOptions,
5502
+ abortSignal,
5503
+ headers
5504
+ });
5505
+ const responseData = {
5506
+ id: (_b2 = (_a19 = result.response) == null ? void 0 : _a19.id) != null ? _b2 : generateId3(),
5507
+ timestamp: (_d = (_c2 = result.response) == null ? void 0 : _c2.timestamp) != null ? _d : currentDate(),
5508
+ modelId: (_f = (_e = result.response) == null ? void 0 : _e.modelId) != null ? _f : model.modelId,
5509
+ headers: (_g = result.response) == null ? void 0 : _g.headers,
5510
+ body: (_h = result.response) == null ? void 0 : _h.body
5511
+ };
5512
+ span2.setAttributes(
5513
+ selectTelemetryAttributes({
5514
+ telemetry,
5515
+ attributes: {
5516
+ "ai.response.finishReason": result.finishReason,
5517
+ "ai.response.text": {
5518
+ output: () => extractContentText(result.content)
5519
+ },
5520
+ "ai.response.toolCalls": {
5521
+ output: () => {
5522
+ const toolCalls = asToolCalls(result.content);
5523
+ return toolCalls == null ? void 0 : JSON.stringify(toolCalls);
5524
+ }
5525
+ },
5526
+ "ai.response.id": responseData.id,
5527
+ "ai.response.model": responseData.modelId,
5528
+ "ai.response.timestamp": responseData.timestamp.toISOString(),
5529
+ // TODO rename telemetry attributes to inputTokens and outputTokens
5530
+ "ai.usage.promptTokens": result.usage.inputTokens,
5531
+ "ai.usage.completionTokens": result.usage.outputTokens,
5532
+ // standardized gen-ai llm span attributes:
5533
+ "gen_ai.response.finish_reasons": [result.finishReason],
5534
+ "gen_ai.response.id": responseData.id,
5535
+ "gen_ai.response.model": responseData.modelId,
5536
+ "gen_ai.usage.input_tokens": result.usage.inputTokens,
5537
+ "gen_ai.usage.output_tokens": result.usage.outputTokens
5538
+ }
5539
+ })
5540
+ );
5541
+ return { ...result, response: responseData };
5742
5542
  }
5743
- }),
5744
- tracer,
5745
- fn: async (span2) => {
5746
- var _a19, _b2, _c2, _d2, _e, _f, _g, _h;
5747
- const result = await model.doGenerate({
5748
- ...callSettings,
5749
- ...toolsAndToolChoice,
5750
- inputFormat: promptFormat,
5751
- responseFormat: output == null ? void 0 : output.responseFormat({ model }),
5752
- prompt: promptMessages,
5753
- providerOptions,
5754
- abortSignal,
5755
- headers
5756
- });
5757
- const responseData = {
5758
- id: (_b2 = (_a19 = result.response) == null ? void 0 : _a19.id) != null ? _b2 : generateId3(),
5759
- timestamp: (_d2 = (_c2 = result.response) == null ? void 0 : _c2.timestamp) != null ? _d2 : currentDate(),
5760
- modelId: (_f = (_e = result.response) == null ? void 0 : _e.modelId) != null ? _f : model.modelId,
5761
- headers: (_g = result.response) == null ? void 0 : _g.headers,
5762
- body: (_h = result.response) == null ? void 0 : _h.body
5763
- };
5764
- span2.setAttributes(
5765
- selectTelemetryAttributes({
5766
- telemetry,
5767
- attributes: {
5768
- "ai.response.finishReason": result.finishReason,
5769
- "ai.response.text": {
5770
- output: () => extractContentText(result.content)
5771
- },
5772
- "ai.response.toolCalls": {
5773
- output: () => {
5774
- const toolCalls = asToolCalls(result.content);
5775
- return toolCalls == null ? void 0 : JSON.stringify(toolCalls);
5776
- }
5777
- },
5778
- "ai.response.id": responseData.id,
5779
- "ai.response.model": responseData.modelId,
5780
- "ai.response.timestamp": responseData.timestamp.toISOString(),
5781
- // TODO rename telemetry attributes to inputTokens and outputTokens
5782
- "ai.usage.promptTokens": result.usage.inputTokens,
5783
- "ai.usage.completionTokens": result.usage.outputTokens,
5784
- // standardized gen-ai llm span attributes:
5785
- "gen_ai.response.finish_reasons": [result.finishReason],
5786
- "gen_ai.response.id": responseData.id,
5787
- "gen_ai.response.model": responseData.modelId,
5788
- "gen_ai.usage.input_tokens": result.usage.inputTokens,
5789
- "gen_ai.usage.output_tokens": result.usage.outputTokens
5790
- }
5791
- })
5792
- );
5793
- return { ...result, response: responseData };
5794
- }
5795
- })
5543
+ });
5544
+ }
5796
5545
  );
5797
5546
  currentToolCalls = await Promise.all(
5798
5547
  currentModelResponse.content.filter(
@@ -5832,7 +5581,7 @@ async function generateText({
5832
5581
  nextStepType = "tool-result";
5833
5582
  }
5834
5583
  }
5835
- const originalText = (_b = extractContentText(currentModelResponse.content)) != null ? _b : "";
5584
+ const originalText = (_a17 = extractContentText(currentModelResponse.content)) != null ? _a17 : "";
5836
5585
  const stepTextLeadingWhitespaceTrimmed = stepType === "continue" && // only for continue steps
5837
5586
  text2.trimEnd() !== text2 ? originalText.trimStart() : originalText;
5838
5587
  const stepText = nextStepType === "continue" ? removeTextAfterLastWhitespace(stepTextLeadingWhitespaceTrimmed) : stepTextLeadingWhitespaceTrimmed;
@@ -5884,7 +5633,7 @@ async function generateText({
5884
5633
  usage: currentUsage,
5885
5634
  warnings: currentModelResponse.warnings,
5886
5635
  logprobs: currentModelResponse.logprobs,
5887
- request: (_c = currentModelResponse.request) != null ? _c : {},
5636
+ request: (_b = currentModelResponse.request) != null ? _b : {},
5888
5637
  response: {
5889
5638
  ...currentModelResponse.response,
5890
5639
  // deep clone msgs to avoid mutating past messages in multi-step:
@@ -5941,7 +5690,7 @@ async function generateText({
5941
5690
  finishReason: currentModelResponse.finishReason,
5942
5691
  usage,
5943
5692
  warnings: currentModelResponse.warnings,
5944
- request: (_d = currentModelResponse.request) != null ? _d : {},
5693
+ request: (_c = currentModelResponse.request) != null ? _c : {},
5945
5694
  response: {
5946
5695
  ...currentModelResponse.response,
5947
5696
  messages: responseMessages
@@ -6165,10 +5914,7 @@ _a15 = symbol15;
6165
5914
  // core/generate-text/output.ts
6166
5915
  var text = () => ({
6167
5916
  type: "text",
6168
- responseFormat: () => ({ type: "text" }),
6169
- injectIntoSystemPrompt({ system }) {
6170
- return system;
6171
- },
5917
+ responseFormat: { type: "text" },
6172
5918
  parsePartial({ text: text2 }) {
6173
5919
  return { partial: text2 };
6174
5920
  },
@@ -6182,15 +5928,9 @@ var object = ({
6182
5928
  const schema = asSchema(inputSchema);
6183
5929
  return {
6184
5930
  type: "object",
6185
- responseFormat: ({ model }) => ({
5931
+ responseFormat: {
6186
5932
  type: "json",
6187
- schema: model.supportsStructuredOutputs ? schema.jsonSchema : void 0
6188
- }),
6189
- injectIntoSystemPrompt({ system, model }) {
6190
- return model.supportsStructuredOutputs ? system : injectJsonInstruction({
6191
- prompt: system,
6192
- schema: schema.jsonSchema
6193
- });
5933
+ schema: schema.jsonSchema
6194
5934
  },
6195
5935
  parsePartial({ text: text2 }) {
6196
5936
  const result = parsePartialJson(text2);
@@ -6793,7 +6533,6 @@ var DefaultStreamTextResult = class {
6793
6533
  this.requestPromise = new DelayedPromise();
6794
6534
  this.responsePromise = new DelayedPromise();
6795
6535
  this.stepsPromise = new DelayedPromise();
6796
- var _a17;
6797
6536
  if (maxSteps < 1) {
6798
6537
  throw new InvalidArgumentError({
6799
6538
  parameter: "maxSteps",
@@ -6945,7 +6684,7 @@ var DefaultStreamTextResult = class {
6945
6684
  }
6946
6685
  },
6947
6686
  async flush(controller) {
6948
- var _a18;
6687
+ var _a17;
6949
6688
  try {
6950
6689
  if (recordedSteps.length === 0) {
6951
6690
  return;
@@ -6982,7 +6721,7 @@ var DefaultStreamTextResult = class {
6982
6721
  sources: lastStep.sources,
6983
6722
  toolCalls: lastStep.toolCalls,
6984
6723
  toolResults: lastStep.toolResults,
6985
- request: (_a18 = lastStep.request) != null ? _a18 : {},
6724
+ request: (_a17 = lastStep.request) != null ? _a17 : {},
6986
6725
  response: lastStep.response,
6987
6726
  warnings: lastStep.warnings,
6988
6727
  providerMetadata: lastStep.providerMetadata,
@@ -6996,8 +6735,8 @@ var DefaultStreamTextResult = class {
6996
6735
  "ai.response.text": { output: () => recordedFullText },
6997
6736
  "ai.response.toolCalls": {
6998
6737
  output: () => {
6999
- var _a19;
7000
- return ((_a19 = lastStep.toolCalls) == null ? void 0 : _a19.length) ? JSON.stringify(lastStep.toolCalls) : void 0;
6738
+ var _a18;
6739
+ return ((_a18 = lastStep.toolCalls) == null ? void 0 : _a18.length) ? JSON.stringify(lastStep.toolCalls) : void 0;
7001
6740
  }
7002
6741
  },
7003
6742
  "ai.usage.promptTokens": usage.promptTokens,
@@ -7031,18 +6770,15 @@ var DefaultStreamTextResult = class {
7031
6770
  maxRetries: maxRetriesArg
7032
6771
  });
7033
6772
  const tracer = getTracer(telemetry);
6773
+ const callSettings = prepareCallSettings(settings);
7034
6774
  const baseTelemetryAttributes = getBaseTelemetryAttributes({
7035
6775
  model,
7036
6776
  telemetry,
7037
6777
  headers,
7038
- settings: { ...settings, maxRetries }
6778
+ settings: { ...callSettings, maxRetries }
7039
6779
  });
7040
6780
  const initialPrompt = standardizePrompt({
7041
- prompt: {
7042
- system: (_a17 = output == null ? void 0 : output.injectIntoSystemPrompt({ system, model })) != null ? _a17 : system,
7043
- prompt,
7044
- messages
7045
- },
6781
+ prompt: { system, prompt, messages },
7046
6782
  tools
7047
6783
  });
7048
6784
  const self = this;
@@ -7073,7 +6809,6 @@ var DefaultStreamTextResult = class {
7073
6809
  hasLeadingWhitespace,
7074
6810
  messageId
7075
6811
  }) {
7076
- var _a18;
7077
6812
  const promptFormat = responseMessages.length === 0 ? initialPrompt.type : "messages";
7078
6813
  const stepInputMessages = [
7079
6814
  ...initialPrompt.messages,
@@ -7085,9 +6820,7 @@ var DefaultStreamTextResult = class {
7085
6820
  system: initialPrompt.system,
7086
6821
  messages: stepInputMessages
7087
6822
  },
7088
- modelSupportsImageUrls: model.supportsImageUrls,
7089
- modelSupportsUrl: (_a18 = model.supportsUrl) == null ? void 0 : _a18.bind(model)
7090
- // support 'this' context
6823
+ supportedUrls: await model.getSupportedUrls()
7091
6824
  });
7092
6825
  const toolsAndToolChoice = {
7093
6826
  ...prepareToolsAndToolChoice({ tools, toolChoice, activeTools })
@@ -7116,8 +6849,8 @@ var DefaultStreamTextResult = class {
7116
6849
  "ai.prompt.tools": {
7117
6850
  // convert the language model level tools:
7118
6851
  input: () => {
7119
- var _a19;
7120
- return (_a19 = toolsAndToolChoice.tools) == null ? void 0 : _a19.map(
6852
+ var _a17;
6853
+ return (_a17 = toolsAndToolChoice.tools) == null ? void 0 : _a17.map(
7121
6854
  (tool2) => JSON.stringify(tool2)
7122
6855
  );
7123
6856
  }
@@ -7128,32 +6861,34 @@ var DefaultStreamTextResult = class {
7128
6861
  // standardized gen-ai llm span attributes:
7129
6862
  "gen_ai.system": model.provider,
7130
6863
  "gen_ai.request.model": model.modelId,
7131
- "gen_ai.request.frequency_penalty": settings.frequencyPenalty,
7132
- "gen_ai.request.max_tokens": settings.maxOutputTokens,
7133
- "gen_ai.request.presence_penalty": settings.presencePenalty,
7134
- "gen_ai.request.stop_sequences": settings.stopSequences,
7135
- "gen_ai.request.temperature": settings.temperature,
7136
- "gen_ai.request.top_k": settings.topK,
7137
- "gen_ai.request.top_p": settings.topP
6864
+ "gen_ai.request.frequency_penalty": callSettings.frequencyPenalty,
6865
+ "gen_ai.request.max_tokens": callSettings.maxOutputTokens,
6866
+ "gen_ai.request.presence_penalty": callSettings.presencePenalty,
6867
+ "gen_ai.request.stop_sequences": callSettings.stopSequences,
6868
+ "gen_ai.request.temperature": callSettings.temperature,
6869
+ "gen_ai.request.top_k": callSettings.topK,
6870
+ "gen_ai.request.top_p": callSettings.topP
7138
6871
  }
7139
6872
  }),
7140
6873
  tracer,
7141
6874
  endWhenDone: false,
7142
- fn: async (doStreamSpan2) => ({
7143
- startTimestampMs: now2(),
7144
- // get before the call
7145
- doStreamSpan: doStreamSpan2,
7146
- result: await model.doStream({
7147
- ...prepareCallSettings(settings),
7148
- ...toolsAndToolChoice,
7149
- inputFormat: promptFormat,
7150
- responseFormat: output == null ? void 0 : output.responseFormat({ model }),
7151
- prompt: promptMessages,
7152
- providerOptions,
7153
- abortSignal,
7154
- headers
7155
- })
7156
- })
6875
+ fn: async (doStreamSpan2) => {
6876
+ return {
6877
+ startTimestampMs: now2(),
6878
+ // get before the call
6879
+ doStreamSpan: doStreamSpan2,
6880
+ result: await model.doStream({
6881
+ ...callSettings,
6882
+ ...toolsAndToolChoice,
6883
+ inputFormat: promptFormat,
6884
+ responseFormat: output == null ? void 0 : output.responseFormat,
6885
+ prompt: promptMessages,
6886
+ providerOptions,
6887
+ abortSignal,
6888
+ headers
6889
+ })
6890
+ };
6891
+ }
7157
6892
  })
7158
6893
  );
7159
6894
  const transformedStream = runToolsTransformation({
@@ -7208,7 +6943,7 @@ var DefaultStreamTextResult = class {
7208
6943
  transformedStream.pipeThrough(
7209
6944
  new TransformStream({
7210
6945
  async transform(chunk, controller) {
7211
- var _a19, _b, _c;
6946
+ var _a17, _b, _c;
7212
6947
  if (chunk.type === "stream-start") {
7213
6948
  warnings = chunk.warnings;
7214
6949
  return;
@@ -7299,7 +7034,7 @@ var DefaultStreamTextResult = class {
7299
7034
  }
7300
7035
  case "response-metadata": {
7301
7036
  stepResponse = {
7302
- id: (_a19 = chunk.id) != null ? _a19 : stepResponse.id,
7037
+ id: (_a17 = chunk.id) != null ? _a17 : stepResponse.id,
7303
7038
  timestamp: (_b = chunk.timestamp) != null ? _b : stepResponse.timestamp,
7304
7039
  modelId: (_c = chunk.modelId) != null ? _c : stepResponse.modelId
7305
7040
  };
@@ -8035,17 +7770,32 @@ function defaultSettingsMiddleware({
8035
7770
  return {
8036
7771
  middlewareVersion: "v2",
8037
7772
  transformParams: async ({ params }) => {
8038
- var _a17;
7773
+ var _a17, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k;
8039
7774
  return {
8040
7775
  ...settings,
8041
7776
  ...params,
7777
+ // map all values that are null to undefined
7778
+ maxOutputTokens: settings.maxOutputTokens !== null ? (_a17 = params.maxOutputTokens) != null ? _a17 : settings.maxOutputTokens : void 0,
7779
+ temperature: settings.temperature !== null ? (
7780
+ // temperature: special case 0 or null
7781
+ params.temperature === 0 || params.temperature == null ? (_b = settings.temperature) != null ? _b : params.temperature : params.temperature
7782
+ ) : void 0,
7783
+ stopSequences: settings.stopSequences !== null ? (_c = params.stopSequences) != null ? _c : settings.stopSequences : void 0,
7784
+ topP: settings.topP !== null ? (_d = params.topP) != null ? _d : settings.topP : void 0,
7785
+ topK: settings.topK !== null ? (_e = params.topK) != null ? _e : settings.topK : void 0,
7786
+ presencePenalty: settings.presencePenalty !== null ? (_f = params.presencePenalty) != null ? _f : settings.presencePenalty : void 0,
7787
+ frequencyPenalty: settings.frequencyPenalty !== null ? (_g = params.frequencyPenalty) != null ? _g : settings.frequencyPenalty : void 0,
7788
+ responseFormat: settings.responseFormat !== null ? (_h = params.responseFormat) != null ? _h : settings.responseFormat : void 0,
7789
+ seed: settings.seed !== null ? (_i = params.seed) != null ? _i : settings.seed : void 0,
7790
+ tools: settings.tools !== null ? (_j = params.tools) != null ? _j : settings.tools : void 0,
7791
+ toolChoice: settings.toolChoice !== null ? (_k = params.toolChoice) != null ? _k : settings.toolChoice : void 0,
7792
+ // headers: deep merge
7793
+ headers: mergeObjects(settings.headers, params.headers),
7794
+ // provider options: deep merge
8042
7795
  providerOptions: mergeObjects(
8043
7796
  settings.providerOptions,
8044
7797
  params.providerOptions
8045
- ),
8046
- // special case for temperature 0
8047
- // TODO remove when temperature defaults to undefined
8048
- temperature: params.temperature === 0 || params.temperature == null ? (_a17 = settings.temperature) != null ? _a17 : 0 : params.temperature
7798
+ )
8049
7799
  };
8050
7800
  }
8051
7801
  };
@@ -8233,7 +7983,6 @@ var doWrap = ({
8233
7983
  modelId,
8234
7984
  providerId
8235
7985
  }) => {
8236
- var _a17;
8237
7986
  async function doTransform({
8238
7987
  params,
8239
7988
  type
@@ -8244,10 +7993,10 @@ var doWrap = ({
8244
7993
  specificationVersion: "v2",
8245
7994
  provider: providerId != null ? providerId : model.provider,
8246
7995
  modelId: modelId != null ? modelId : model.modelId,
8247
- defaultObjectGenerationMode: model.defaultObjectGenerationMode,
8248
- supportsImageUrls: model.supportsImageUrls,
8249
- supportsUrl: (_a17 = model.supportsUrl) == null ? void 0 : _a17.bind(model),
8250
- supportsStructuredOutputs: model.supportsStructuredOutputs,
7996
+ // TODO middleware should be able to modify the supported urls
7997
+ async getSupportedUrls() {
7998
+ return model.getSupportedUrls();
7999
+ },
8251
8000
  async doGenerate(params) {
8252
8001
  const transformedParams = await doTransform({ params, type: "generate" });
8253
8002
  const doGenerate = async () => model.doGenerate(transformedParams);