@mastra/server 0.14.0 → 0.14.1-alpha.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -4,7 +4,7 @@ import { handleError } from './chunk-CY4TP3FK.js';
4
4
  import { HTTPException } from './chunk-MMROOK5J.js';
5
5
  import { __commonJS, __export, __toESM } from './chunk-G3PMV62Z.js';
6
6
  import { z } from 'zod';
7
- import z52, { z as z$1 } from 'zod/v4';
7
+ import z62, { z as z$1 } from 'zod/v4';
8
8
  import { RuntimeContext } from '@mastra/core/runtime-context';
9
9
 
10
10
  // ../../node_modules/.pnpm/secure-json-parse@2.7.0/node_modules/secure-json-parse/index.js
@@ -3333,6 +3333,14 @@ function prepareTools2({
3333
3333
  });
3334
3334
  break;
3335
3335
  }
3336
+ case "anthropic.code_execution_20250522": {
3337
+ betas.add("code-execution-2025-05-22");
3338
+ anthropicTools22.push({
3339
+ type: "code_execution_20250522",
3340
+ name: "code_execution"
3341
+ });
3342
+ break;
3343
+ }
3336
3344
  default:
3337
3345
  toolWarnings.push({ type: "unsupported-tool", tool: tool2 });
3338
3346
  break;
@@ -3394,6 +3402,23 @@ function prepareTools2({
3394
3402
  }
3395
3403
  }
3396
3404
  }
3405
+ var codeExecution_20250522OutputSchema = z$1.object({
3406
+ type: z$1.literal("code_execution_result"),
3407
+ stdout: z$1.string(),
3408
+ stderr: z$1.string(),
3409
+ return_code: z$1.number()
3410
+ });
3411
+ var factory2 = createProviderDefinedToolFactoryWithOutputSchema({
3412
+ id: "anthropic.code_execution_20250522",
3413
+ name: "code_execution",
3414
+ inputSchema: z$1.object({
3415
+ code: z$1.string()
3416
+ }),
3417
+ outputSchema: codeExecution_20250522OutputSchema
3418
+ });
3419
+ var codeExecution_20250522 = (args = {}) => {
3420
+ return factory2(args);
3421
+ };
3397
3422
  function convertToString(data) {
3398
3423
  if (typeof data === "string") {
3399
3424
  return Buffer.from(data, "base64").toString("utf-8");
@@ -3693,6 +3718,16 @@ async function convertToAnthropicMessagesPrompt2({
3693
3718
  });
3694
3719
  break;
3695
3720
  }
3721
+ if (part.toolName === "code_execution") {
3722
+ anthropicContent.push({
3723
+ type: "server_tool_use",
3724
+ id: part.toolCallId,
3725
+ name: "code_execution",
3726
+ input: part.input,
3727
+ cache_control: cacheControl
3728
+ });
3729
+ break;
3730
+ }
3696
3731
  warnings.push({
3697
3732
  type: "other",
3698
3733
  message: `provider executed tool call for tool ${part.toolName} is not supported`
@@ -3735,6 +3770,29 @@ async function convertToAnthropicMessagesPrompt2({
3735
3770
  });
3736
3771
  break;
3737
3772
  }
3773
+ if (part.toolName === "code_execution") {
3774
+ const output = part.output;
3775
+ if (output.type !== "json") {
3776
+ warnings.push({
3777
+ type: "other",
3778
+ message: `provider executed tool result output type ${output.type} for tool ${part.toolName} is not supported`
3779
+ });
3780
+ break;
3781
+ }
3782
+ const codeExecutionOutput = codeExecution_20250522OutputSchema.parse(output.value);
3783
+ anthropicContent.push({
3784
+ type: "code_execution_tool_result",
3785
+ tool_use_id: part.toolCallId,
3786
+ content: {
3787
+ type: codeExecutionOutput.type,
3788
+ stdout: codeExecutionOutput.stdout,
3789
+ stderr: codeExecutionOutput.stderr,
3790
+ return_code: codeExecutionOutput.return_code
3791
+ },
3792
+ cache_control: cacheControl
3793
+ });
3794
+ break;
3795
+ }
3738
3796
  warnings.push({
3739
3797
  type: "other",
3740
3798
  message: `provider executed tool result for tool ${part.toolName} is not supported`
@@ -4175,7 +4233,7 @@ var AnthropicMessagesLanguageModel2 = class {
4175
4233
  break;
4176
4234
  }
4177
4235
  case "server_tool_use": {
4178
- if (part.name === "web_search") {
4236
+ if (part.name === "web_search" || part.name === "code_execution") {
4179
4237
  content.push({
4180
4238
  type: "tool-call",
4181
4239
  toolCallId: part.id,
@@ -4233,6 +4291,35 @@ var AnthropicMessagesLanguageModel2 = class {
4233
4291
  }
4234
4292
  break;
4235
4293
  }
4294
+ case "code_execution_tool_result": {
4295
+ if (part.content.type === "code_execution_result") {
4296
+ content.push({
4297
+ type: "tool-result",
4298
+ toolCallId: part.tool_use_id,
4299
+ toolName: "code_execution",
4300
+ result: {
4301
+ type: part.content.type,
4302
+ stdout: part.content.stdout,
4303
+ stderr: part.content.stderr,
4304
+ return_code: part.content.return_code
4305
+ },
4306
+ providerExecuted: true
4307
+ });
4308
+ } else if (part.content.type === "code_execution_tool_result_error") {
4309
+ content.push({
4310
+ type: "tool-result",
4311
+ toolCallId: part.tool_use_id,
4312
+ toolName: "code_execution",
4313
+ isError: true,
4314
+ result: {
4315
+ type: "code_execution_tool_result_error",
4316
+ errorCode: part.content.error_code
4317
+ },
4318
+ providerExecuted: true
4319
+ });
4320
+ }
4321
+ break;
4322
+ }
4236
4323
  }
4237
4324
  }
4238
4325
  return {
@@ -4358,7 +4445,7 @@ var AnthropicMessagesLanguageModel2 = class {
4358
4445
  return;
4359
4446
  }
4360
4447
  case "server_tool_use": {
4361
- if (value.content_block.name === "web_search") {
4448
+ if (value.content_block.name === "web_search" || value.content_block.name === "code_execution") {
4362
4449
  contentBlocks[value.index] = {
4363
4450
  type: "tool-call",
4364
4451
  toolCallId: value.content_block.id,
@@ -4423,6 +4510,36 @@ var AnthropicMessagesLanguageModel2 = class {
4423
4510
  }
4424
4511
  return;
4425
4512
  }
4513
+ case "code_execution_tool_result": {
4514
+ const part = value.content_block;
4515
+ if (part.content.type === "code_execution_result") {
4516
+ controller.enqueue({
4517
+ type: "tool-result",
4518
+ toolCallId: part.tool_use_id,
4519
+ toolName: "code_execution",
4520
+ result: {
4521
+ type: part.content.type,
4522
+ stdout: part.content.stdout,
4523
+ stderr: part.content.stderr,
4524
+ return_code: part.content.return_code
4525
+ },
4526
+ providerExecuted: true
4527
+ });
4528
+ } else if (part.content.type === "code_execution_tool_result_error") {
4529
+ controller.enqueue({
4530
+ type: "tool-result",
4531
+ toolCallId: part.tool_use_id,
4532
+ toolName: "code_execution",
4533
+ isError: true,
4534
+ result: {
4535
+ type: "code_execution_tool_result_error",
4536
+ errorCode: part.content.error_code
4537
+ },
4538
+ providerExecuted: true
4539
+ });
4540
+ }
4541
+ return;
4542
+ }
4426
4543
  default: {
4427
4544
  const _exhaustiveCheck = contentBlockType;
4428
4545
  throw new Error(
@@ -4645,6 +4762,22 @@ var anthropicMessagesResponseSchema2 = z$1.object({
4645
4762
  error_code: z$1.string()
4646
4763
  })
4647
4764
  ])
4765
+ }),
4766
+ z$1.object({
4767
+ type: z$1.literal("code_execution_tool_result"),
4768
+ tool_use_id: z$1.string(),
4769
+ content: z$1.union([
4770
+ z$1.object({
4771
+ type: z$1.literal("code_execution_result"),
4772
+ stdout: z$1.string(),
4773
+ stderr: z$1.string(),
4774
+ return_code: z$1.number()
4775
+ }),
4776
+ z$1.object({
4777
+ type: z$1.literal("code_execution_tool_result_error"),
4778
+ error_code: z$1.string()
4779
+ })
4780
+ ])
4648
4781
  })
4649
4782
  ])
4650
4783
  ),
@@ -4715,6 +4848,22 @@ var anthropicMessagesChunkSchema2 = z$1.discriminatedUnion("type", [
4715
4848
  error_code: z$1.string()
4716
4849
  })
4717
4850
  ])
4851
+ }),
4852
+ z$1.object({
4853
+ type: z$1.literal("code_execution_tool_result"),
4854
+ tool_use_id: z$1.string(),
4855
+ content: z$1.union([
4856
+ z$1.object({
4857
+ type: z$1.literal("code_execution_result"),
4858
+ stdout: z$1.string(),
4859
+ stderr: z$1.string(),
4860
+ return_code: z$1.number()
4861
+ }),
4862
+ z$1.object({
4863
+ type: z$1.literal("code_execution_tool_result_error"),
4864
+ error_code: z$1.string()
4865
+ })
4866
+ ])
4718
4867
  })
4719
4868
  ])
4720
4869
  }),
@@ -4774,17 +4923,17 @@ var anthropicReasoningMetadataSchema = z$1.object({
4774
4923
  var bash_20241022 = createProviderDefinedToolFactory({
4775
4924
  id: "anthropic.bash_20241022",
4776
4925
  name: "bash",
4777
- inputSchema: z52.object({
4778
- command: z52.string(),
4779
- restart: z52.boolean().optional()
4926
+ inputSchema: z62.object({
4927
+ command: z62.string(),
4928
+ restart: z62.boolean().optional()
4780
4929
  })
4781
4930
  });
4782
4931
  var bash_20250124 = createProviderDefinedToolFactory({
4783
4932
  id: "anthropic.bash_20250124",
4784
4933
  name: "bash",
4785
- inputSchema: z52.object({
4786
- command: z52.string(),
4787
- restart: z52.boolean().optional()
4934
+ inputSchema: z62.object({
4935
+ command: z62.string(),
4936
+ restart: z62.boolean().optional()
4788
4937
  })
4789
4938
  });
4790
4939
  var computer_20241022 = createProviderDefinedToolFactory({
@@ -4936,7 +5085,11 @@ var anthropicTools2 = {
4936
5085
  * @param blockedDomains - Optional list of domains that Claude should avoid when searching.
4937
5086
  * @param userLocation - Optional user location information to provide geographically relevant search results.
4938
5087
  */
4939
- webSearch_20250305
5088
+ webSearch_20250305,
5089
+ /**
5090
+ * Creates a tool for executing Python code. Must have name "code_execution".
5091
+ */
5092
+ codeExecution_20250522
4940
5093
  };
4941
5094
  function createAnthropic2(options = {}) {
4942
5095
  var _a16;
@@ -5431,6 +5584,7 @@ var GoogleGenerativeAILanguageModel = class {
5431
5584
  args: {
5432
5585
  generationConfig,
5433
5586
  contents,
5587
+ systemInstruction,
5434
5588
  tools: {
5435
5589
  functionDeclarations: [
5436
5590
  {
@@ -6199,9 +6353,20 @@ function convertToGoogleGenerativeAIMessages2(prompt, options) {
6199
6353
  contents.push({
6200
6354
  role: "model",
6201
6355
  parts: content.map((part) => {
6356
+ var _a23, _b, _c, _d, _e, _f;
6202
6357
  switch (part.type) {
6203
6358
  case "text": {
6204
- return part.text.length === 0 ? void 0 : { text: part.text };
6359
+ return part.text.length === 0 ? void 0 : {
6360
+ text: part.text,
6361
+ thoughtSignature: (_b = (_a23 = part.providerOptions) == null ? void 0 : _a23.google) == null ? void 0 : _b.thoughtSignature
6362
+ };
6363
+ }
6364
+ case "reasoning": {
6365
+ return part.text.length === 0 ? void 0 : {
6366
+ text: part.text,
6367
+ thought: true,
6368
+ thoughtSignature: (_d = (_c = part.providerOptions) == null ? void 0 : _c.google) == null ? void 0 : _d.thoughtSignature
6369
+ };
6205
6370
  }
6206
6371
  case "file": {
6207
6372
  if (part.mediaType !== "image/png") {
@@ -6226,7 +6391,8 @@ function convertToGoogleGenerativeAIMessages2(prompt, options) {
6226
6391
  functionCall: {
6227
6392
  name: part.toolName,
6228
6393
  args: part.input
6229
- }
6394
+ },
6395
+ thoughtSignature: (_f = (_e = part.providerOptions) == null ? void 0 : _e.google) == null ? void 0 : _f.thoughtSignature
6230
6396
  };
6231
6397
  }
6232
6398
  }
@@ -6321,7 +6487,13 @@ var googleGenerativeAIProviderOptions = z$1.object({
6321
6487
  *
6322
6488
  * https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/audio-understanding
6323
6489
  */
6324
- audioTimestamp: z$1.boolean().optional()
6490
+ audioTimestamp: z$1.boolean().optional(),
6491
+ /**
6492
+ * Optional. Defines labels used in billing reports. Available on Vertex AI only.
6493
+ *
6494
+ * https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/add-labels-to-api-calls
6495
+ */
6496
+ labels: z$1.record(z$1.string(), z$1.string()).optional()
6325
6497
  });
6326
6498
  function prepareTools4({
6327
6499
  tools,
@@ -6624,7 +6796,8 @@ var GoogleGenerativeAILanguageModel2 = class {
6624
6796
  safetySettings: googleOptions == null ? void 0 : googleOptions.safetySettings,
6625
6797
  tools: googleTools2,
6626
6798
  toolConfig: googleToolConfig,
6627
- cachedContent: googleOptions == null ? void 0 : googleOptions.cachedContent
6799
+ cachedContent: googleOptions == null ? void 0 : googleOptions.cachedContent,
6800
+ labels: googleOptions == null ? void 0 : googleOptions.labels
6628
6801
  },
6629
6802
  warnings: [...warnings, ...toolWarnings]
6630
6803
  };
@@ -6682,17 +6855,18 @@ var GoogleGenerativeAILanguageModel2 = class {
6682
6855
  });
6683
6856
  lastCodeExecutionToolCallId = void 0;
6684
6857
  } else if ("text" in part && part.text != null && part.text.length > 0) {
6685
- if (part.thought === true) {
6686
- content.push({ type: "reasoning", text: part.text });
6687
- } else {
6688
- content.push({ type: "text", text: part.text });
6689
- }
6858
+ content.push({
6859
+ type: part.thought === true ? "reasoning" : "text",
6860
+ text: part.text,
6861
+ providerMetadata: part.thoughtSignature ? { google: { thoughtSignature: part.thoughtSignature } } : void 0
6862
+ });
6690
6863
  } else if ("functionCall" in part) {
6691
6864
  content.push({
6692
6865
  type: "tool-call",
6693
6866
  toolCallId: this.config.generateId(),
6694
6867
  toolName: part.functionCall.name,
6695
- input: JSON.stringify(part.functionCall.args)
6868
+ input: JSON.stringify(part.functionCall.args),
6869
+ providerMetadata: part.thoughtSignature ? { google: { thoughtSignature: part.thoughtSignature } } : void 0
6696
6870
  });
6697
6871
  } else if ("inlineData" in part) {
6698
6872
  content.push({
@@ -6854,13 +7028,21 @@ var GoogleGenerativeAILanguageModel2 = class {
6854
7028
  currentReasoningBlockId = String(blockCounter++);
6855
7029
  controller.enqueue({
6856
7030
  type: "reasoning-start",
6857
- id: currentReasoningBlockId
7031
+ id: currentReasoningBlockId,
7032
+ providerMetadata: part.thoughtSignature ? {
7033
+ google: {
7034
+ thoughtSignature: part.thoughtSignature
7035
+ }
7036
+ } : void 0
6858
7037
  });
6859
7038
  }
6860
7039
  controller.enqueue({
6861
7040
  type: "reasoning-delta",
6862
7041
  id: currentReasoningBlockId,
6863
- delta: part.text
7042
+ delta: part.text,
7043
+ providerMetadata: part.thoughtSignature ? {
7044
+ google: { thoughtSignature: part.thoughtSignature }
7045
+ } : void 0
6864
7046
  });
6865
7047
  } else {
6866
7048
  if (currentReasoningBlockId !== null) {
@@ -6874,13 +7056,21 @@ var GoogleGenerativeAILanguageModel2 = class {
6874
7056
  currentTextBlockId = String(blockCounter++);
6875
7057
  controller.enqueue({
6876
7058
  type: "text-start",
6877
- id: currentTextBlockId
7059
+ id: currentTextBlockId,
7060
+ providerMetadata: part.thoughtSignature ? {
7061
+ google: {
7062
+ thoughtSignature: part.thoughtSignature
7063
+ }
7064
+ } : void 0
6878
7065
  });
6879
7066
  }
6880
7067
  controller.enqueue({
6881
7068
  type: "text-delta",
6882
7069
  id: currentTextBlockId,
6883
- delta: part.text
7070
+ delta: part.text,
7071
+ providerMetadata: part.thoughtSignature ? {
7072
+ google: { thoughtSignature: part.thoughtSignature }
7073
+ } : void 0
6884
7074
  });
6885
7075
  }
6886
7076
  }
@@ -6904,22 +7094,26 @@ var GoogleGenerativeAILanguageModel2 = class {
6904
7094
  controller.enqueue({
6905
7095
  type: "tool-input-start",
6906
7096
  id: toolCall.toolCallId,
6907
- toolName: toolCall.toolName
7097
+ toolName: toolCall.toolName,
7098
+ providerMetadata: toolCall.providerMetadata
6908
7099
  });
6909
7100
  controller.enqueue({
6910
7101
  type: "tool-input-delta",
6911
7102
  id: toolCall.toolCallId,
6912
- delta: toolCall.args
7103
+ delta: toolCall.args,
7104
+ providerMetadata: toolCall.providerMetadata
6913
7105
  });
6914
7106
  controller.enqueue({
6915
7107
  type: "tool-input-end",
6916
- id: toolCall.toolCallId
7108
+ id: toolCall.toolCallId,
7109
+ providerMetadata: toolCall.providerMetadata
6917
7110
  });
6918
7111
  controller.enqueue({
6919
7112
  type: "tool-call",
6920
7113
  toolCallId: toolCall.toolCallId,
6921
7114
  toolName: toolCall.toolName,
6922
- input: toolCall.args
7115
+ input: toolCall.args,
7116
+ providerMetadata: toolCall.providerMetadata
6923
7117
  });
6924
7118
  hasToolCalls = true;
6925
7119
  }
@@ -6980,7 +7174,8 @@ function getToolCallsFromParts2({
6980
7174
  type: "tool-call",
6981
7175
  toolCallId: generateId3(),
6982
7176
  toolName: part.functionCall.name,
6983
- args: JSON.stringify(part.functionCall.args)
7177
+ args: JSON.stringify(part.functionCall.args),
7178
+ providerMetadata: part.thoughtSignature ? { google: { thoughtSignature: part.thoughtSignature } } : void 0
6984
7179
  }));
6985
7180
  }
6986
7181
  function getInlineDataParts2(parts) {
@@ -7011,7 +7206,8 @@ var contentSchema2 = z$1.object({
7011
7206
  functionCall: z$1.object({
7012
7207
  name: z$1.string(),
7013
7208
  args: z$1.unknown()
7014
- })
7209
+ }),
7210
+ thoughtSignature: z$1.string().nullish()
7015
7211
  }),
7016
7212
  z$1.object({
7017
7213
  inlineData: z$1.object({
@@ -7029,7 +7225,8 @@ var contentSchema2 = z$1.object({
7029
7225
  output: z$1.string()
7030
7226
  }).nullish(),
7031
7227
  text: z$1.string().nullish(),
7032
- thought: z$1.boolean().nullish()
7228
+ thought: z$1.boolean().nullish(),
7229
+ thoughtSignature: z$1.string().nullish()
7033
7230
  })
7034
7231
  ])
7035
7232
  ).nullish()
@@ -7226,9 +7423,14 @@ function createGoogleGenerativeAI2(options = {}) {
7226
7423
  generateId: (_a23 = options.generateId) != null ? _a23 : generateId2,
7227
7424
  supportedUrls: () => ({
7228
7425
  "*": [
7229
- // Only allow requests to the Google Generative Language "files" endpoint
7426
+ // Google Generative Language "files" endpoint
7230
7427
  // e.g. https://generativelanguage.googleapis.com/v1beta/files/...
7231
- new RegExp(`^${baseURL}/files/.*$`)
7428
+ new RegExp(`^${baseURL}/files/.*$`),
7429
+ // YouTube URLs (public or unlisted videos)
7430
+ new RegExp(
7431
+ `^https://(?:www\\.)?youtube\\.com/watch\\?v=[\\w-]+(?:&[\\w=&.-]*)?$`
7432
+ ),
7433
+ new RegExp(`^https://youtu\\.be/[\\w-]+(?:\\?[\\w=&.-]*)?$`)
7232
7434
  ]
7233
7435
  }),
7234
7436
  fetch: options.fetch
@@ -7383,12 +7585,12 @@ function prepareTools5({
7383
7585
  return { tools: void 0, tool_choice: void 0, toolWarnings };
7384
7586
  }
7385
7587
  const toolChoice = mode.toolChoice;
7386
- const groqTools = [];
7588
+ const groqTools2 = [];
7387
7589
  for (const tool2 of tools) {
7388
7590
  if (tool2.type === "provider-defined") {
7389
7591
  toolWarnings.push({ type: "unsupported-tool", tool: tool2 });
7390
7592
  } else {
7391
- groqTools.push({
7593
+ groqTools2.push({
7392
7594
  type: "function",
7393
7595
  function: {
7394
7596
  name: tool2.name,
@@ -7399,17 +7601,17 @@ function prepareTools5({
7399
7601
  }
7400
7602
  }
7401
7603
  if (toolChoice == null) {
7402
- return { tools: groqTools, tool_choice: void 0, toolWarnings };
7604
+ return { tools: groqTools2, tool_choice: void 0, toolWarnings };
7403
7605
  }
7404
7606
  const type = toolChoice.type;
7405
7607
  switch (type) {
7406
7608
  case "auto":
7407
7609
  case "none":
7408
7610
  case "required":
7409
- return { tools: groqTools, tool_choice: type, toolWarnings };
7611
+ return { tools: groqTools2, tool_choice: type, toolWarnings };
7410
7612
  case "tool":
7411
7613
  return {
7412
- tools: groqTools,
7614
+ tools: groqTools2,
7413
7615
  tool_choice: {
7414
7616
  type: "function",
7415
7617
  function: {
@@ -8050,7 +8252,7 @@ function convertToGroqChatMessages2(prompt) {
8050
8252
  return {
8051
8253
  type: "image_url",
8052
8254
  image_url: {
8053
- url: part.data instanceof URL ? part.data.toString() : `data:${mediaType};base64,${part.data}`
8255
+ url: part.data instanceof URL ? part.data.toString() : `data:${mediaType};base64,${convertToBase64(part.data)}`
8054
8256
  }
8055
8257
  };
8056
8258
  }
@@ -8061,9 +8263,16 @@ function convertToGroqChatMessages2(prompt) {
8061
8263
  }
8062
8264
  case "assistant": {
8063
8265
  let text = "";
8266
+ let reasoning = "";
8064
8267
  const toolCalls = [];
8065
8268
  for (const part of content) {
8066
8269
  switch (part.type) {
8270
+ // groq supports reasoning for tool-calls in multi-turn conversations
8271
+ // https://github.com/vercel/ai/issues/7860
8272
+ case "reasoning": {
8273
+ reasoning += part.text;
8274
+ break;
8275
+ }
8067
8276
  case "text": {
8068
8277
  text += part.text;
8069
8278
  break;
@@ -8084,7 +8293,8 @@ function convertToGroqChatMessages2(prompt) {
8084
8293
  messages.push({
8085
8294
  role: "assistant",
8086
8295
  content: text,
8087
- tool_calls: toolCalls.length > 0 ? toolCalls : void 0
8296
+ ...reasoning.length > 0 ? { reasoning } : null,
8297
+ ...toolCalls.length > 0 ? { tool_calls: toolCalls } : null
8088
8298
  });
8089
8299
  break;
8090
8300
  }
@@ -8132,6 +8342,7 @@ function getResponseMetadata2({
8132
8342
  }
8133
8343
  var groqProviderOptions = z$1.object({
8134
8344
  reasoningFormat: z$1.enum(["parsed", "raw", "hidden"]).optional(),
8345
+ reasoningEffort: z$1.string().optional(),
8135
8346
  /**
8136
8347
  * Whether to enable parallel function calling during tool use. Default to true.
8137
8348
  */
@@ -8158,21 +8369,46 @@ var groqFailedResponseHandler2 = createJsonErrorResponseHandler2({
8158
8369
  errorSchema: groqErrorDataSchema2,
8159
8370
  errorToMessage: (data) => data.error.message
8160
8371
  });
8372
+ var BROWSER_SEARCH_SUPPORTED_MODELS = [
8373
+ "openai/gpt-oss-20b",
8374
+ "openai/gpt-oss-120b"
8375
+ ];
8376
+ function isBrowserSearchSupportedModel(modelId) {
8377
+ return BROWSER_SEARCH_SUPPORTED_MODELS.includes(modelId);
8378
+ }
8379
+ function getSupportedModelsString() {
8380
+ return BROWSER_SEARCH_SUPPORTED_MODELS.join(", ");
8381
+ }
8161
8382
  function prepareTools6({
8162
8383
  tools,
8163
- toolChoice
8384
+ toolChoice,
8385
+ modelId
8164
8386
  }) {
8165
8387
  tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
8166
8388
  const toolWarnings = [];
8167
8389
  if (tools == null) {
8168
8390
  return { tools: void 0, toolChoice: void 0, toolWarnings };
8169
8391
  }
8170
- const groqTools = [];
8392
+ const groqTools2 = [];
8171
8393
  for (const tool2 of tools) {
8172
8394
  if (tool2.type === "provider-defined") {
8173
- toolWarnings.push({ type: "unsupported-tool", tool: tool2 });
8395
+ if (tool2.id === "groq.browser_search") {
8396
+ if (!isBrowserSearchSupportedModel(modelId)) {
8397
+ toolWarnings.push({
8398
+ type: "unsupported-tool",
8399
+ tool: tool2,
8400
+ details: `Browser search is only supported on the following models: ${getSupportedModelsString()}. Current model: ${modelId}`
8401
+ });
8402
+ } else {
8403
+ groqTools2.push({
8404
+ type: "browser_search"
8405
+ });
8406
+ }
8407
+ } else {
8408
+ toolWarnings.push({ type: "unsupported-tool", tool: tool2 });
8409
+ }
8174
8410
  } else {
8175
- groqTools.push({
8411
+ groqTools2.push({
8176
8412
  type: "function",
8177
8413
  function: {
8178
8414
  name: tool2.name,
@@ -8183,17 +8419,17 @@ function prepareTools6({
8183
8419
  }
8184
8420
  }
8185
8421
  if (toolChoice == null) {
8186
- return { tools: groqTools, toolChoice: void 0, toolWarnings };
8422
+ return { tools: groqTools2, toolChoice: void 0, toolWarnings };
8187
8423
  }
8188
8424
  const type = toolChoice.type;
8189
8425
  switch (type) {
8190
8426
  case "auto":
8191
8427
  case "none":
8192
8428
  case "required":
8193
- return { tools: groqTools, toolChoice: type, toolWarnings };
8429
+ return { tools: groqTools2, toolChoice: type, toolWarnings };
8194
8430
  case "tool":
8195
8431
  return {
8196
- tools: groqTools,
8432
+ tools: groqTools2,
8197
8433
  toolChoice: {
8198
8434
  type: "function",
8199
8435
  function: {
@@ -8275,10 +8511,10 @@ var GroqChatLanguageModel2 = class {
8275
8511
  });
8276
8512
  }
8277
8513
  const {
8278
- tools: groqTools,
8514
+ tools: groqTools2,
8279
8515
  toolChoice: groqToolChoice,
8280
8516
  toolWarnings
8281
- } = prepareTools6({ tools, toolChoice });
8517
+ } = prepareTools6({ tools, toolChoice, modelId: this.modelId });
8282
8518
  return {
8283
8519
  args: {
8284
8520
  // model id:
@@ -8305,10 +8541,11 @@ var GroqChatLanguageModel2 = class {
8305
8541
  } : { type: "json_object" } : void 0,
8306
8542
  // provider options:
8307
8543
  reasoning_format: groqOptions == null ? void 0 : groqOptions.reasoningFormat,
8544
+ reasoning_effort: groqOptions == null ? void 0 : groqOptions.reasoningEffort,
8308
8545
  // messages:
8309
8546
  messages: convertToGroqChatMessages2(prompt),
8310
8547
  // tools:
8311
- tools: groqTools,
8548
+ tools: groqTools2,
8312
8549
  tool_choice: groqToolChoice
8313
8550
  },
8314
8551
  warnings: [...warnings, ...toolWarnings]
@@ -8766,6 +9003,14 @@ var groqTranscriptionResponseSchema2 = z$1.object({
8766
9003
  id: z$1.string()
8767
9004
  })
8768
9005
  });
9006
+ var browserSearch = createProviderDefinedToolFactory({
9007
+ id: "groq.browser_search",
9008
+ name: "browser_search",
9009
+ inputSchema: z$1.object({})
9010
+ });
9011
+ var groqTools = {
9012
+ browserSearch
9013
+ };
8769
9014
  function createGroq2(options = {}) {
8770
9015
  var _a16;
8771
9016
  const baseURL = (_a16 = withoutTrailingSlash2(options.baseURL)) != null ? _a16 : "https://api.groq.com/openai/v1";
@@ -8811,6 +9056,7 @@ function createGroq2(options = {}) {
8811
9056
  throw new NoSuchModelError2({ modelId, modelType: "imageModel" });
8812
9057
  };
8813
9058
  provider.transcription = createTranscriptionModel;
9059
+ provider.tools = groqTools;
8814
9060
  return provider;
8815
9061
  }
8816
9062
  var groq2 = createGroq2();
@@ -11581,6 +11827,21 @@ var openai = createOpenAI({
11581
11827
  compatibility: "strict"
11582
11828
  // strict for OpenAI API
11583
11829
  });
11830
+ var openaiErrorDataSchema2 = z$1.object({
11831
+ error: z$1.object({
11832
+ message: z$1.string(),
11833
+ // The additional information below is handled loosely to support
11834
+ // OpenAI-compatible providers that have slightly different error
11835
+ // responses:
11836
+ type: z$1.string().nullish(),
11837
+ param: z$1.any().nullish(),
11838
+ code: z$1.union([z$1.string(), z$1.number()]).nullish()
11839
+ })
11840
+ });
11841
+ var openaiFailedResponseHandler2 = createJsonErrorResponseHandler2({
11842
+ errorSchema: openaiErrorDataSchema2,
11843
+ errorToMessage: (data) => data.error.message
11844
+ });
11584
11845
  function convertToOpenAIChatMessages2({
11585
11846
  prompt,
11586
11847
  systemMessageMode = "system"
@@ -11679,7 +11940,7 @@ function convertToOpenAIChatMessages2({
11679
11940
  }
11680
11941
  return {
11681
11942
  type: "file",
11682
- file: {
11943
+ file: typeof part.data === "string" && part.data.startsWith("file-") ? { file_id: part.data } : {
11683
11944
  filename: (_c = part.filename) != null ? _c : `part-${index}.pdf`,
11684
11945
  file_data: `data:application/pdf;base64,${convertToBase64(part.data)}`
11685
11946
  }
@@ -11811,7 +12072,7 @@ var openaiProviderOptions = z$1.object({
11811
12072
  /**
11812
12073
  * Reasoning effort for reasoning models. Defaults to `medium`.
11813
12074
  */
11814
- reasoningEffort: z$1.enum(["low", "medium", "high"]).optional(),
12075
+ reasoningEffort: z$1.enum(["minimal", "low", "medium", "high"]).optional(),
11815
12076
  /**
11816
12077
  * Maximum number of completion tokens to generate. Useful for reasoning models.
11817
12078
  */
@@ -11848,22 +12109,25 @@ var openaiProviderOptions = z$1.object({
11848
12109
  *
11849
12110
  * @default false
11850
12111
  */
11851
- strictJsonSchema: z$1.boolean().optional()
11852
- });
11853
- var openaiErrorDataSchema2 = z$1.object({
11854
- error: z$1.object({
11855
- message: z$1.string(),
11856
- // The additional information below is handled loosely to support
11857
- // OpenAI-compatible providers that have slightly different error
11858
- // responses:
11859
- type: z$1.string().nullish(),
11860
- param: z$1.any().nullish(),
11861
- code: z$1.union([z$1.string(), z$1.number()]).nullish()
11862
- })
11863
- });
11864
- var openaiFailedResponseHandler2 = createJsonErrorResponseHandler2({
11865
- errorSchema: openaiErrorDataSchema2,
11866
- errorToMessage: (data) => data.error.message
12112
+ strictJsonSchema: z$1.boolean().optional(),
12113
+ /**
12114
+ * Controls the verbosity of the model's responses.
12115
+ * Lower values will result in more concise responses, while higher values will result in more verbose responses.
12116
+ */
12117
+ textVerbosity: z$1.enum(["low", "medium", "high"]).optional(),
12118
+ /**
12119
+ * A cache key for prompt caching. Allows manual control over prompt caching behavior.
12120
+ * Useful for improving cache hit rates and working around automatic caching issues.
12121
+ */
12122
+ promptCacheKey: z$1.string().optional(),
12123
+ /**
12124
+ * A stable identifier used to help detect users of your application
12125
+ * that may be violating OpenAI's usage policies. The IDs should be a
12126
+ * string that uniquely identifies each user. We recommend hashing their
12127
+ * username or email address, in order to avoid sending us any identifying
12128
+ * information.
12129
+ */
12130
+ safetyIdentifier: z$1.string().optional()
11867
12131
  });
11868
12132
  var comparisonFilterSchema = z$1.object({
11869
12133
  key: z$1.string(),
@@ -11943,7 +12207,7 @@ var webSearchPreview = createProviderDefinedToolFactory({
11943
12207
  name: "web_search_preview",
11944
12208
  inputSchema: z$1.object({})
11945
12209
  });
11946
- function prepareTools8({
12210
+ function prepareChatTools({
11947
12211
  tools,
11948
12212
  toolChoice,
11949
12213
  structuredOutputs,
@@ -12110,14 +12374,17 @@ var OpenAIChatLanguageModel2 = class {
12110
12374
  } : { type: "json_object" } : void 0,
12111
12375
  stop: stopSequences,
12112
12376
  seed,
12377
+ verbosity: openaiOptions.textVerbosity,
12113
12378
  // openai specific settings:
12114
- // TODO remove in next major version; we auto-map maxOutputTokens now
12379
+ // TODO AI SDK 6: remove, we auto-map maxOutputTokens now
12115
12380
  max_completion_tokens: openaiOptions.maxCompletionTokens,
12116
12381
  store: openaiOptions.store,
12117
12382
  metadata: openaiOptions.metadata,
12118
12383
  prediction: openaiOptions.prediction,
12119
12384
  reasoning_effort: openaiOptions.reasoningEffort,
12120
12385
  service_tier: openaiOptions.serviceTier,
12386
+ prompt_cache_key: openaiOptions.promptCacheKey,
12387
+ safety_identifier: openaiOptions.safetyIdentifier,
12121
12388
  // messages:
12122
12389
  messages
12123
12390
  };
@@ -12195,7 +12462,7 @@ var OpenAIChatLanguageModel2 = class {
12195
12462
  warnings.push({
12196
12463
  type: "unsupported-setting",
12197
12464
  setting: "serviceTier",
12198
- details: "flex processing is only available for o3 and o4-mini models"
12465
+ details: "flex processing is only available for o3, o4-mini, and gpt-5 models"
12199
12466
  });
12200
12467
  baseArgs.service_tier = void 0;
12201
12468
  }
@@ -12203,7 +12470,7 @@ var OpenAIChatLanguageModel2 = class {
12203
12470
  warnings.push({
12204
12471
  type: "unsupported-setting",
12205
12472
  setting: "serviceTier",
12206
- details: "priority processing is only available for supported models (GPT-4, o3, o4-mini) and requires Enterprise access"
12473
+ details: "priority processing is only available for supported models (gpt-4, gpt-5, gpt-5-mini, o3, o4-mini) and requires Enterprise access. gpt-5-nano is not supported"
12207
12474
  });
12208
12475
  baseArgs.service_tier = void 0;
12209
12476
  }
@@ -12211,7 +12478,7 @@ var OpenAIChatLanguageModel2 = class {
12211
12478
  tools: openaiTools22,
12212
12479
  toolChoice: openaiToolChoice,
12213
12480
  toolWarnings
12214
- } = prepareTools8({
12481
+ } = prepareChatTools({
12215
12482
  tools,
12216
12483
  toolChoice,
12217
12484
  structuredOutputs,
@@ -12227,7 +12494,7 @@ var OpenAIChatLanguageModel2 = class {
12227
12494
  };
12228
12495
  }
12229
12496
  async doGenerate(options) {
12230
- var _a16, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m;
12497
+ var _a16, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n;
12231
12498
  const { args: body, warnings } = await this.getArgs(options);
12232
12499
  const {
12233
12500
  responseHeaders,
@@ -12261,8 +12528,17 @@ var OpenAIChatLanguageModel2 = class {
12261
12528
  input: toolCall.function.arguments
12262
12529
  });
12263
12530
  }
12264
- const completionTokenDetails = (_c = response.usage) == null ? void 0 : _c.completion_tokens_details;
12265
- const promptTokenDetails = (_d = response.usage) == null ? void 0 : _d.prompt_tokens_details;
12531
+ for (const annotation of (_c = choice.message.annotations) != null ? _c : []) {
12532
+ content.push({
12533
+ type: "source",
12534
+ sourceType: "url",
12535
+ id: generateId2(),
12536
+ url: annotation.url,
12537
+ title: annotation.title
12538
+ });
12539
+ }
12540
+ const completionTokenDetails = (_d = response.usage) == null ? void 0 : _d.completion_tokens_details;
12541
+ const promptTokenDetails = (_e = response.usage) == null ? void 0 : _e.prompt_tokens_details;
12266
12542
  const providerMetadata = { openai: {} };
12267
12543
  if ((completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens) != null) {
12268
12544
  providerMetadata.openai.acceptedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens;
@@ -12270,18 +12546,18 @@ var OpenAIChatLanguageModel2 = class {
12270
12546
  if ((completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens) != null) {
12271
12547
  providerMetadata.openai.rejectedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens;
12272
12548
  }
12273
- if (((_e = choice.logprobs) == null ? void 0 : _e.content) != null) {
12549
+ if (((_f = choice.logprobs) == null ? void 0 : _f.content) != null) {
12274
12550
  providerMetadata.openai.logprobs = choice.logprobs.content;
12275
12551
  }
12276
12552
  return {
12277
12553
  content,
12278
12554
  finishReason: mapOpenAIFinishReason2(choice.finish_reason),
12279
12555
  usage: {
12280
- inputTokens: (_g = (_f = response.usage) == null ? void 0 : _f.prompt_tokens) != null ? _g : void 0,
12281
- outputTokens: (_i = (_h = response.usage) == null ? void 0 : _h.completion_tokens) != null ? _i : void 0,
12282
- totalTokens: (_k = (_j = response.usage) == null ? void 0 : _j.total_tokens) != null ? _k : void 0,
12283
- reasoningTokens: (_l = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null ? _l : void 0,
12284
- cachedInputTokens: (_m = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens) != null ? _m : void 0
12556
+ inputTokens: (_h = (_g = response.usage) == null ? void 0 : _g.prompt_tokens) != null ? _h : void 0,
12557
+ outputTokens: (_j = (_i = response.usage) == null ? void 0 : _i.completion_tokens) != null ? _j : void 0,
12558
+ totalTokens: (_l = (_k = response.usage) == null ? void 0 : _k.total_tokens) != null ? _l : void 0,
12559
+ reasoningTokens: (_m = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null ? _m : void 0,
12560
+ cachedInputTokens: (_n = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens) != null ? _n : void 0
12285
12561
  },
12286
12562
  request: { body },
12287
12563
  response: {
@@ -12478,6 +12754,17 @@ var OpenAIChatLanguageModel2 = class {
12478
12754
  }
12479
12755
  }
12480
12756
  }
12757
+ if (delta.annotations != null) {
12758
+ for (const annotation of delta.annotations) {
12759
+ controller.enqueue({
12760
+ type: "source",
12761
+ sourceType: "url",
12762
+ id: generateId2(),
12763
+ url: annotation.url,
12764
+ title: annotation.title
12765
+ });
12766
+ }
12767
+ }
12481
12768
  },
12482
12769
  flush(controller) {
12483
12770
  if (isActiveText) {
@@ -12528,6 +12815,15 @@ var openaiChatResponseSchema2 = z$1.object({
12528
12815
  arguments: z$1.string()
12529
12816
  })
12530
12817
  })
12818
+ ).nullish(),
12819
+ annotations: z$1.array(
12820
+ z$1.object({
12821
+ type: z$1.literal("url_citation"),
12822
+ start_index: z$1.number(),
12823
+ end_index: z$1.number(),
12824
+ url: z$1.string(),
12825
+ title: z$1.string()
12826
+ })
12531
12827
  ).nullish()
12532
12828
  }),
12533
12829
  index: z$1.number(),
@@ -12570,6 +12866,15 @@ var openaiChatChunkSchema2 = z$1.union([
12570
12866
  arguments: z$1.string().nullish()
12571
12867
  })
12572
12868
  })
12869
+ ).nullish(),
12870
+ annotations: z$1.array(
12871
+ z$1.object({
12872
+ type: z$1.literal("url_citation"),
12873
+ start_index: z$1.number(),
12874
+ end_index: z$1.number(),
12875
+ url: z$1.string(),
12876
+ title: z$1.string()
12877
+ })
12573
12878
  ).nullish()
12574
12879
  }).nullish(),
12575
12880
  logprobs: z$1.object({
@@ -12595,13 +12900,13 @@ var openaiChatChunkSchema2 = z$1.union([
12595
12900
  openaiErrorDataSchema2
12596
12901
  ]);
12597
12902
  function isReasoningModel2(modelId) {
12598
- return modelId.startsWith("o");
12903
+ return (modelId.startsWith("o") || modelId.startsWith("gpt-5")) && !modelId.startsWith("gpt-5-chat");
12599
12904
  }
12600
12905
  function supportsFlexProcessing(modelId) {
12601
- return modelId.startsWith("o3") || modelId.startsWith("o4-mini");
12906
+ return modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-chat");
12602
12907
  }
12603
12908
  function supportsPriorityProcessing(modelId) {
12604
- return modelId.startsWith("gpt-4") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
12909
+ return modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") && !modelId.startsWith("gpt-5-chat") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
12605
12910
  }
12606
12911
  function getSystemMessageMode2(modelId) {
12607
12912
  var _a16, _b;
@@ -12714,6 +13019,32 @@ ${assistantMessage}
12714
13019
  ${user}:`]
12715
13020
  };
12716
13021
  }
13022
+ function getResponseMetadata22({
13023
+ id,
13024
+ model,
13025
+ created
13026
+ }) {
13027
+ return {
13028
+ id: id != null ? id : void 0,
13029
+ modelId: model != null ? model : void 0,
13030
+ timestamp: created != null ? new Date(created * 1e3) : void 0
13031
+ };
13032
+ }
13033
+ function mapOpenAIFinishReason22(finishReason) {
13034
+ switch (finishReason) {
13035
+ case "stop":
13036
+ return "stop";
13037
+ case "length":
13038
+ return "length";
13039
+ case "content_filter":
13040
+ return "content-filter";
13041
+ case "function_call":
13042
+ case "tool_calls":
13043
+ return "tool-calls";
13044
+ default:
13045
+ return "unknown";
13046
+ }
13047
+ }
12717
13048
  var openaiCompletionProviderOptions = z$1.object({
12718
13049
  /**
12719
13050
  Echo back the prompt in addition to the completion.
@@ -12873,10 +13204,10 @@ var OpenAICompletionLanguageModel2 = class {
12873
13204
  outputTokens: (_b = response.usage) == null ? void 0 : _b.completion_tokens,
12874
13205
  totalTokens: (_c = response.usage) == null ? void 0 : _c.total_tokens
12875
13206
  },
12876
- finishReason: mapOpenAIFinishReason2(choice.finish_reason),
13207
+ finishReason: mapOpenAIFinishReason22(choice.finish_reason),
12877
13208
  request: { body: args },
12878
13209
  response: {
12879
- ...getResponseMetadata4(response),
13210
+ ...getResponseMetadata22(response),
12880
13211
  headers: responseHeaders,
12881
13212
  body: rawResponse
12882
13213
  },
@@ -12940,7 +13271,7 @@ var OpenAICompletionLanguageModel2 = class {
12940
13271
  isFirstChunk = false;
12941
13272
  controller.enqueue({
12942
13273
  type: "response-metadata",
12943
- ...getResponseMetadata4(value)
13274
+ ...getResponseMetadata22(value)
12944
13275
  });
12945
13276
  controller.enqueue({ type: "text-start", id: "0" });
12946
13277
  }
@@ -12951,7 +13282,7 @@ var OpenAICompletionLanguageModel2 = class {
12951
13282
  }
12952
13283
  const choice = value.choices[0];
12953
13284
  if ((choice == null ? void 0 : choice.finish_reason) != null) {
12954
- finishReason = mapOpenAIFinishReason2(choice.finish_reason);
13285
+ finishReason = mapOpenAIFinishReason22(choice.finish_reason);
12955
13286
  }
12956
13287
  if ((choice == null ? void 0 : choice.logprobs) != null) {
12957
13288
  providerMetadata.openai.logprobs = choice.logprobs;
@@ -13191,193 +13522,32 @@ var openaiImageResponseSchema2 = z$1.object({
13191
13522
  z$1.object({ b64_json: z$1.string(), revised_prompt: z$1.string().optional() })
13192
13523
  )
13193
13524
  });
13525
+ var codeInterpreterArgsSchema = z$1.object({
13526
+ container: z$1.union([
13527
+ z$1.string(),
13528
+ z$1.object({
13529
+ fileIds: z$1.array(z$1.string()).optional()
13530
+ })
13531
+ ]).optional()
13532
+ });
13533
+ var codeInterpreter = createProviderDefinedToolFactory({
13534
+ id: "openai.code_interpreter",
13535
+ name: "code_interpreter",
13536
+ inputSchema: z$1.object({})
13537
+ });
13194
13538
  var openaiTools2 = {
13539
+ codeInterpreter,
13195
13540
  fileSearch,
13196
13541
  webSearchPreview
13197
13542
  };
13198
- var openAITranscriptionProviderOptions = z$1.object({
13199
- /**
13200
- * Additional information to include in the transcription response.
13201
- */
13202
- include: z$1.array(z$1.string()).optional(),
13203
- /**
13204
- * The language of the input audio in ISO-639-1 format.
13205
- */
13206
- language: z$1.string().optional(),
13207
- /**
13208
- * An optional text to guide the model's style or continue a previous audio segment.
13209
- */
13210
- prompt: z$1.string().optional(),
13211
- /**
13212
- * The sampling temperature, between 0 and 1.
13213
- * @default 0
13214
- */
13215
- temperature: z$1.number().min(0).max(1).default(0).optional(),
13216
- /**
13217
- * The timestamp granularities to populate for this transcription.
13218
- * @default ['segment']
13219
- */
13220
- timestampGranularities: z$1.array(z$1.enum(["word", "segment"])).default(["segment"]).optional()
13221
- });
13222
- var languageMap2 = {
13223
- afrikaans: "af",
13224
- arabic: "ar",
13225
- armenian: "hy",
13226
- azerbaijani: "az",
13227
- belarusian: "be",
13228
- bosnian: "bs",
13229
- bulgarian: "bg",
13230
- catalan: "ca",
13231
- chinese: "zh",
13232
- croatian: "hr",
13233
- czech: "cs",
13234
- danish: "da",
13235
- dutch: "nl",
13236
- english: "en",
13237
- estonian: "et",
13238
- finnish: "fi",
13239
- french: "fr",
13240
- galician: "gl",
13241
- german: "de",
13242
- greek: "el",
13243
- hebrew: "he",
13244
- hindi: "hi",
13245
- hungarian: "hu",
13246
- icelandic: "is",
13247
- indonesian: "id",
13248
- italian: "it",
13249
- japanese: "ja",
13250
- kannada: "kn",
13251
- kazakh: "kk",
13252
- korean: "ko",
13253
- latvian: "lv",
13254
- lithuanian: "lt",
13255
- macedonian: "mk",
13256
- malay: "ms",
13257
- marathi: "mr",
13258
- maori: "mi",
13259
- nepali: "ne",
13260
- norwegian: "no",
13261
- persian: "fa",
13262
- polish: "pl",
13263
- portuguese: "pt",
13264
- romanian: "ro",
13265
- russian: "ru",
13266
- serbian: "sr",
13267
- slovak: "sk",
13268
- slovenian: "sl",
13269
- spanish: "es",
13270
- swahili: "sw",
13271
- swedish: "sv",
13272
- tagalog: "tl",
13273
- tamil: "ta",
13274
- thai: "th",
13275
- turkish: "tr",
13276
- ukrainian: "uk",
13277
- urdu: "ur",
13278
- vietnamese: "vi",
13279
- welsh: "cy"
13280
- };
13281
- var OpenAITranscriptionModel2 = class {
13282
- constructor(modelId, config) {
13283
- this.modelId = modelId;
13284
- this.config = config;
13285
- this.specificationVersion = "v2";
13286
- }
13287
- get provider() {
13288
- return this.config.provider;
13289
- }
13290
- async getArgs({
13291
- audio,
13292
- mediaType,
13293
- providerOptions
13294
- }) {
13295
- const warnings = [];
13296
- const openAIOptions = await parseProviderOptions2({
13297
- provider: "openai",
13298
- providerOptions,
13299
- schema: openAITranscriptionProviderOptions
13300
- });
13301
- const formData = new FormData();
13302
- const blob = audio instanceof Uint8Array ? new Blob([audio]) : new Blob([convertBase64ToUint8Array2(audio)]);
13303
- formData.append("model", this.modelId);
13304
- formData.append("file", new File([blob], "audio", { type: mediaType }));
13305
- if (openAIOptions) {
13306
- const transcriptionModelOptions = {
13307
- include: openAIOptions.include,
13308
- language: openAIOptions.language,
13309
- prompt: openAIOptions.prompt,
13310
- temperature: openAIOptions.temperature,
13311
- timestamp_granularities: openAIOptions.timestampGranularities
13312
- };
13313
- for (const [key, value] of Object.entries(transcriptionModelOptions)) {
13314
- if (value != null) {
13315
- formData.append(key, String(value));
13316
- }
13317
- }
13318
- }
13319
- return {
13320
- formData,
13321
- warnings
13322
- };
13323
- }
13324
- async doGenerate(options) {
13325
- var _a16, _b, _c, _d, _e, _f;
13326
- const currentDate = (_c = (_b = (_a16 = this.config._internal) == null ? void 0 : _a16.currentDate) == null ? void 0 : _b.call(_a16)) != null ? _c : /* @__PURE__ */ new Date();
13327
- const { formData, warnings } = await this.getArgs(options);
13328
- const {
13329
- value: response,
13330
- responseHeaders,
13331
- rawValue: rawResponse
13332
- } = await postFormDataToApi2({
13333
- url: this.config.url({
13334
- path: "/audio/transcriptions",
13335
- modelId: this.modelId
13336
- }),
13337
- headers: combineHeaders2(this.config.headers(), options.headers),
13338
- formData,
13339
- failedResponseHandler: openaiFailedResponseHandler2,
13340
- successfulResponseHandler: createJsonResponseHandler2(
13341
- openaiTranscriptionResponseSchema2
13342
- ),
13343
- abortSignal: options.abortSignal,
13344
- fetch: this.config.fetch
13345
- });
13346
- const language = response.language != null && response.language in languageMap2 ? languageMap2[response.language] : void 0;
13347
- return {
13348
- text: response.text,
13349
- segments: (_e = (_d = response.words) == null ? void 0 : _d.map((word) => ({
13350
- text: word.word,
13351
- startSecond: word.start,
13352
- endSecond: word.end
13353
- }))) != null ? _e : [],
13354
- language,
13355
- durationInSeconds: (_f = response.duration) != null ? _f : void 0,
13356
- warnings,
13357
- response: {
13358
- timestamp: currentDate,
13359
- modelId: this.modelId,
13360
- headers: responseHeaders,
13361
- body: rawResponse
13362
- }
13363
- };
13364
- }
13365
- };
13366
- var openaiTranscriptionResponseSchema2 = z$1.object({
13367
- text: z$1.string(),
13368
- language: z$1.string().nullish(),
13369
- duration: z$1.number().nullish(),
13370
- words: z$1.array(
13371
- z$1.object({
13372
- word: z$1.string(),
13373
- start: z$1.number(),
13374
- end: z$1.number()
13375
- })
13376
- ).nullish()
13377
- });
13543
+ function isFileId(data, prefixes) {
13544
+ if (!prefixes) return false;
13545
+ return prefixes.some((prefix) => data.startsWith(prefix));
13546
+ }
13378
13547
  async function convertToOpenAIResponsesMessages2({
13379
13548
  prompt,
13380
- systemMessageMode
13549
+ systemMessageMode,
13550
+ fileIdPrefixes
13381
13551
  }) {
13382
13552
  var _a16, _b, _c, _d, _e, _f;
13383
13553
  const messages = [];
@@ -13424,8 +13594,9 @@ async function convertToOpenAIResponsesMessages2({
13424
13594
  const mediaType = part.mediaType === "image/*" ? "image/jpeg" : part.mediaType;
13425
13595
  return {
13426
13596
  type: "input_image",
13427
- image_url: part.data instanceof URL ? part.data.toString() : `data:${mediaType};base64,${part.data}`,
13428
- // OpenAI specific extension: image detail
13597
+ ...part.data instanceof URL ? { image_url: part.data.toString() } : typeof part.data === "string" && isFileId(part.data, fileIdPrefixes) ? { file_id: part.data } : {
13598
+ image_url: `data:${mediaType};base64,${convertToBase64(part.data)}`
13599
+ },
13429
13600
  detail: (_b2 = (_a23 = part.providerOptions) == null ? void 0 : _a23.openai) == null ? void 0 : _b2.imageDetail
13430
13601
  };
13431
13602
  } else if (part.mediaType === "application/pdf") {
@@ -13436,8 +13607,10 @@ async function convertToOpenAIResponsesMessages2({
13436
13607
  }
13437
13608
  return {
13438
13609
  type: "input_file",
13439
- filename: (_c2 = part.filename) != null ? _c2 : `part-${index}.pdf`,
13440
- file_data: `data:application/pdf;base64,${part.data}`
13610
+ ...typeof part.data === "string" && isFileId(part.data, fileIdPrefixes) ? { file_id: part.data } : {
13611
+ filename: (_c2 = part.filename) != null ? _c2 : `part-${index}.pdf`,
13612
+ file_data: `data:application/pdf;base64,${convertToBase64(part.data)}`
13613
+ }
13441
13614
  };
13442
13615
  } else {
13443
13616
  throw new UnsupportedFunctionalityError2({
@@ -13596,7 +13769,7 @@ function prepareResponsesTools2({
13596
13769
  strict: strictJsonSchema
13597
13770
  });
13598
13771
  break;
13599
- case "provider-defined":
13772
+ case "provider-defined": {
13600
13773
  switch (tool2.id) {
13601
13774
  case "openai.file_search": {
13602
13775
  const args = fileSearchArgsSchema.parse(tool2.args);
@@ -13609,18 +13782,30 @@ function prepareResponsesTools2({
13609
13782
  });
13610
13783
  break;
13611
13784
  }
13612
- case "openai.web_search_preview":
13785
+ case "openai.web_search_preview": {
13786
+ const args = webSearchPreviewArgsSchema.parse(tool2.args);
13613
13787
  openaiTools22.push({
13614
13788
  type: "web_search_preview",
13615
- search_context_size: tool2.args.searchContextSize,
13616
- user_location: tool2.args.userLocation
13789
+ search_context_size: args.searchContextSize,
13790
+ user_location: args.userLocation
13617
13791
  });
13618
13792
  break;
13619
- default:
13793
+ }
13794
+ case "openai.code_interpreter": {
13795
+ const args = codeInterpreterArgsSchema.parse(tool2.args);
13796
+ openaiTools22.push({
13797
+ type: "code_interpreter",
13798
+ container: args.container == null ? { type: "auto", file_ids: void 0 } : typeof args.container === "string" ? args.container : { type: "auto", file_ids: args.container.fileIds }
13799
+ });
13800
+ break;
13801
+ }
13802
+ default: {
13620
13803
  toolWarnings.push({ type: "unsupported-tool", tool: tool2 });
13621
13804
  break;
13805
+ }
13622
13806
  }
13623
13807
  break;
13808
+ }
13624
13809
  default:
13625
13810
  toolWarnings.push({ type: "unsupported-tool", tool: tool2 });
13626
13811
  break;
@@ -13638,7 +13823,7 @@ function prepareResponsesTools2({
13638
13823
  case "tool":
13639
13824
  return {
13640
13825
  tools: openaiTools22,
13641
- toolChoice: toolChoice.toolName === "file_search" ? { type: "file_search" } : toolChoice.toolName === "web_search_preview" ? { type: "web_search_preview" } : { type: "function", name: toolChoice.toolName },
13826
+ toolChoice: toolChoice.toolName === "code_interpreter" || toolChoice.toolName === "file_search" || toolChoice.toolName === "web_search_preview" ? { type: toolChoice.toolName } : { type: "function", name: toolChoice.toolName },
13642
13827
  toolWarnings
13643
13828
  };
13644
13829
  default: {
@@ -13649,6 +13834,19 @@ function prepareResponsesTools2({
13649
13834
  }
13650
13835
  }
13651
13836
  }
13837
+ var TOP_LOGPROBS_MAX = 20;
13838
+ var LOGPROBS_SCHEMA = z$1.array(
13839
+ z$1.object({
13840
+ token: z$1.string(),
13841
+ logprob: z$1.number(),
13842
+ top_logprobs: z$1.array(
13843
+ z$1.object({
13844
+ token: z$1.string(),
13845
+ logprob: z$1.number()
13846
+ })
13847
+ )
13848
+ })
13849
+ );
13652
13850
  var OpenAIResponsesLanguageModel2 = class {
13653
13851
  constructor(modelId, config) {
13654
13852
  this.specificationVersion = "v2";
@@ -13702,7 +13900,8 @@ var OpenAIResponsesLanguageModel2 = class {
13702
13900
  }
13703
13901
  const { messages, warnings: messageWarnings } = await convertToOpenAIResponsesMessages2({
13704
13902
  prompt,
13705
- systemMessageMode: modelConfig.systemMessageMode
13903
+ systemMessageMode: modelConfig.systemMessageMode,
13904
+ fileIdPrefixes: this.config.fileIdPrefixes
13706
13905
  });
13707
13906
  warnings.push(...messageWarnings);
13708
13907
  const openaiOptions = await parseProviderOptions2({
@@ -13711,21 +13910,28 @@ var OpenAIResponsesLanguageModel2 = class {
13711
13910
  schema: openaiResponsesProviderOptionsSchema2
13712
13911
  });
13713
13912
  const strictJsonSchema = (_a16 = openaiOptions == null ? void 0 : openaiOptions.strictJsonSchema) != null ? _a16 : false;
13913
+ const topLogprobs = typeof (openaiOptions == null ? void 0 : openaiOptions.logprobs) === "number" ? openaiOptions == null ? void 0 : openaiOptions.logprobs : (openaiOptions == null ? void 0 : openaiOptions.logprobs) === true ? TOP_LOGPROBS_MAX : void 0;
13914
+ const openaiOptionsInclude = topLogprobs ? Array.isArray(openaiOptions == null ? void 0 : openaiOptions.include) ? [...openaiOptions == null ? void 0 : openaiOptions.include, "message.output_text.logprobs"] : ["message.output_text.logprobs"] : openaiOptions == null ? void 0 : openaiOptions.include;
13714
13915
  const baseArgs = {
13715
13916
  model: this.modelId,
13716
13917
  input: messages,
13717
13918
  temperature,
13718
13919
  top_p: topP,
13719
13920
  max_output_tokens: maxOutputTokens,
13720
- ...(responseFormat == null ? void 0 : responseFormat.type) === "json" && {
13921
+ ...((responseFormat == null ? void 0 : responseFormat.type) === "json" || (openaiOptions == null ? void 0 : openaiOptions.textVerbosity)) && {
13721
13922
  text: {
13722
- format: responseFormat.schema != null ? {
13723
- type: "json_schema",
13724
- strict: strictJsonSchema,
13725
- name: (_b = responseFormat.name) != null ? _b : "response",
13726
- description: responseFormat.description,
13727
- schema: responseFormat.schema
13728
- } : { type: "json_object" }
13923
+ ...(responseFormat == null ? void 0 : responseFormat.type) === "json" && {
13924
+ format: responseFormat.schema != null ? {
13925
+ type: "json_schema",
13926
+ strict: strictJsonSchema,
13927
+ name: (_b = responseFormat.name) != null ? _b : "response",
13928
+ description: responseFormat.description,
13929
+ schema: responseFormat.schema
13930
+ } : { type: "json_object" }
13931
+ },
13932
+ ...(openaiOptions == null ? void 0 : openaiOptions.textVerbosity) && {
13933
+ verbosity: openaiOptions.textVerbosity
13934
+ }
13729
13935
  }
13730
13936
  },
13731
13937
  // provider options:
@@ -13736,7 +13942,10 @@ var OpenAIResponsesLanguageModel2 = class {
13736
13942
  user: openaiOptions == null ? void 0 : openaiOptions.user,
13737
13943
  instructions: openaiOptions == null ? void 0 : openaiOptions.instructions,
13738
13944
  service_tier: openaiOptions == null ? void 0 : openaiOptions.serviceTier,
13739
- include: openaiOptions == null ? void 0 : openaiOptions.include,
13945
+ include: openaiOptionsInclude,
13946
+ prompt_cache_key: openaiOptions == null ? void 0 : openaiOptions.promptCacheKey,
13947
+ safety_identifier: openaiOptions == null ? void 0 : openaiOptions.safetyIdentifier,
13948
+ top_logprobs: topLogprobs,
13740
13949
  // model-specific settings:
13741
13950
  ...modelConfig.isReasoningModel && ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null || (openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) && {
13742
13951
  reasoning: {
@@ -13785,19 +13994,19 @@ var OpenAIResponsesLanguageModel2 = class {
13785
13994
  });
13786
13995
  }
13787
13996
  }
13788
- if ((openaiOptions == null ? void 0 : openaiOptions.serviceTier) === "flex" && !supportsFlexProcessing2(this.modelId)) {
13997
+ if ((openaiOptions == null ? void 0 : openaiOptions.serviceTier) === "flex" && !modelConfig.supportsFlexProcessing) {
13789
13998
  warnings.push({
13790
13999
  type: "unsupported-setting",
13791
14000
  setting: "serviceTier",
13792
- details: "flex processing is only available for o3 and o4-mini models"
14001
+ details: "flex processing is only available for o3, o4-mini, and gpt-5 models"
13793
14002
  });
13794
14003
  delete baseArgs.service_tier;
13795
14004
  }
13796
- if ((openaiOptions == null ? void 0 : openaiOptions.serviceTier) === "priority" && !supportsPriorityProcessing2(this.modelId)) {
14005
+ if ((openaiOptions == null ? void 0 : openaiOptions.serviceTier) === "priority" && !modelConfig.supportsPriorityProcessing) {
13797
14006
  warnings.push({
13798
14007
  type: "unsupported-setting",
13799
14008
  setting: "serviceTier",
13800
- details: "priority processing is only available for supported models (GPT-4, o3, o4-mini) and requires Enterprise access"
14009
+ details: "priority processing is only available for supported models (gpt-4, gpt-5, gpt-5-mini, o3, o4-mini) and requires Enterprise access. gpt-5-nano is not supported"
13801
14010
  });
13802
14011
  delete baseArgs.service_tier;
13803
14012
  }
@@ -13820,7 +14029,7 @@ var OpenAIResponsesLanguageModel2 = class {
13820
14029
  };
13821
14030
  }
13822
14031
  async doGenerate(options) {
13823
- var _a16, _b, _c, _d, _e, _f, _g, _h, _i;
14032
+ var _a16, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q;
13824
14033
  const { args: body, warnings } = await this.getArgs(options);
13825
14034
  const url = this.config.url({
13826
14035
  path: "/responses",
@@ -13854,14 +14063,24 @@ var OpenAIResponsesLanguageModel2 = class {
13854
14063
  z$1.object({
13855
14064
  type: z$1.literal("output_text"),
13856
14065
  text: z$1.string(),
14066
+ logprobs: LOGPROBS_SCHEMA.nullish(),
13857
14067
  annotations: z$1.array(
13858
- z$1.object({
13859
- type: z$1.literal("url_citation"),
13860
- start_index: z$1.number(),
13861
- end_index: z$1.number(),
13862
- url: z$1.string(),
13863
- title: z$1.string()
13864
- })
14068
+ z$1.discriminatedUnion("type", [
14069
+ z$1.object({
14070
+ type: z$1.literal("url_citation"),
14071
+ start_index: z$1.number(),
14072
+ end_index: z$1.number(),
14073
+ url: z$1.string(),
14074
+ title: z$1.string()
14075
+ }),
14076
+ z$1.object({
14077
+ type: z$1.literal("file_citation"),
14078
+ start_index: z$1.number(),
14079
+ end_index: z$1.number(),
14080
+ file_id: z$1.string(),
14081
+ quote: z$1.string()
14082
+ })
14083
+ ])
13865
14084
  )
13866
14085
  })
13867
14086
  )
@@ -13876,13 +14095,33 @@ var OpenAIResponsesLanguageModel2 = class {
13876
14095
  z$1.object({
13877
14096
  type: z$1.literal("web_search_call"),
13878
14097
  id: z$1.string(),
13879
- status: z$1.string().optional()
14098
+ status: z$1.string().optional(),
14099
+ action: z$1.object({
14100
+ type: z$1.literal("search"),
14101
+ query: z$1.string().optional()
14102
+ }).nullish()
13880
14103
  }),
13881
14104
  z$1.object({
13882
14105
  type: z$1.literal("computer_call"),
13883
14106
  id: z$1.string(),
13884
14107
  status: z$1.string().optional()
13885
14108
  }),
14109
+ z$1.object({
14110
+ type: z$1.literal("file_search_call"),
14111
+ id: z$1.string(),
14112
+ status: z$1.string().optional(),
14113
+ queries: z$1.array(z$1.string()).nullish(),
14114
+ results: z$1.array(
14115
+ z$1.object({
14116
+ attributes: z$1.object({
14117
+ file_id: z$1.string(),
14118
+ filename: z$1.string(),
14119
+ score: z$1.number(),
14120
+ text: z$1.string()
14121
+ })
14122
+ })
14123
+ ).nullish()
14124
+ }),
13886
14125
  z$1.object({
13887
14126
  type: z$1.literal("reasoning"),
13888
14127
  id: z$1.string(),
@@ -13915,6 +14154,7 @@ var OpenAIResponsesLanguageModel2 = class {
13915
14154
  });
13916
14155
  }
13917
14156
  const content = [];
14157
+ const logprobs = [];
13918
14158
  for (const part of response.output) {
13919
14159
  switch (part.type) {
13920
14160
  case "reasoning": {
@@ -13937,6 +14177,9 @@ var OpenAIResponsesLanguageModel2 = class {
13937
14177
  }
13938
14178
  case "message": {
13939
14179
  for (const contentPart of part.content) {
14180
+ if (((_c = (_b = options.providerOptions) == null ? void 0 : _b.openai) == null ? void 0 : _c.logprobs) && contentPart.logprobs) {
14181
+ logprobs.push(contentPart.logprobs);
14182
+ }
13940
14183
  content.push({
13941
14184
  type: "text",
13942
14185
  text: contentPart.text,
@@ -13947,13 +14190,24 @@ var OpenAIResponsesLanguageModel2 = class {
13947
14190
  }
13948
14191
  });
13949
14192
  for (const annotation of contentPart.annotations) {
13950
- content.push({
13951
- type: "source",
13952
- sourceType: "url",
13953
- id: (_d = (_c = (_b = this.config).generateId) == null ? void 0 : _c.call(_b)) != null ? _d : generateId2(),
13954
- url: annotation.url,
13955
- title: annotation.title
13956
- });
14193
+ if (annotation.type === "url_citation") {
14194
+ content.push({
14195
+ type: "source",
14196
+ sourceType: "url",
14197
+ id: (_f = (_e = (_d = this.config).generateId) == null ? void 0 : _e.call(_d)) != null ? _f : generateId2(),
14198
+ url: annotation.url,
14199
+ title: annotation.title
14200
+ });
14201
+ } else if (annotation.type === "file_citation") {
14202
+ content.push({
14203
+ type: "source",
14204
+ sourceType: "document",
14205
+ id: (_i = (_h = (_g = this.config).generateId) == null ? void 0 : _h.call(_g)) != null ? _i : generateId2(),
14206
+ mediaType: "text/plain",
14207
+ title: annotation.quote,
14208
+ filename: annotation.file_id
14209
+ });
14210
+ }
13957
14211
  }
13958
14212
  }
13959
14213
  break;
@@ -13977,14 +14231,17 @@ var OpenAIResponsesLanguageModel2 = class {
13977
14231
  type: "tool-call",
13978
14232
  toolCallId: part.id,
13979
14233
  toolName: "web_search_preview",
13980
- input: "",
14234
+ input: (_k = (_j = part.action) == null ? void 0 : _j.query) != null ? _k : "",
13981
14235
  providerExecuted: true
13982
14236
  });
13983
14237
  content.push({
13984
14238
  type: "tool-result",
13985
14239
  toolCallId: part.id,
13986
14240
  toolName: "web_search_preview",
13987
- result: { status: part.status || "completed" },
14241
+ result: {
14242
+ status: part.status || "completed",
14243
+ ...((_l = part.action) == null ? void 0 : _l.query) && { query: part.action.query }
14244
+ },
13988
14245
  providerExecuted: true
13989
14246
  });
13990
14247
  break;
@@ -14009,20 +14266,48 @@ var OpenAIResponsesLanguageModel2 = class {
14009
14266
  });
14010
14267
  break;
14011
14268
  }
14269
+ case "file_search_call": {
14270
+ content.push({
14271
+ type: "tool-call",
14272
+ toolCallId: part.id,
14273
+ toolName: "file_search",
14274
+ input: "",
14275
+ providerExecuted: true
14276
+ });
14277
+ content.push({
14278
+ type: "tool-result",
14279
+ toolCallId: part.id,
14280
+ toolName: "file_search",
14281
+ result: {
14282
+ type: "file_search_tool_result",
14283
+ status: part.status || "completed",
14284
+ ...part.queries && { queries: part.queries },
14285
+ ...part.results && { results: part.results }
14286
+ },
14287
+ providerExecuted: true
14288
+ });
14289
+ break;
14290
+ }
14012
14291
  }
14013
14292
  }
14293
+ const providerMetadata = {
14294
+ openai: { responseId: response.id }
14295
+ };
14296
+ if (logprobs.length > 0) {
14297
+ providerMetadata.openai.logprobs = logprobs;
14298
+ }
14014
14299
  return {
14015
14300
  content,
14016
14301
  finishReason: mapOpenAIResponseFinishReason2({
14017
- finishReason: (_e = response.incomplete_details) == null ? void 0 : _e.reason,
14302
+ finishReason: (_m = response.incomplete_details) == null ? void 0 : _m.reason,
14018
14303
  hasToolCalls: content.some((part) => part.type === "tool-call")
14019
14304
  }),
14020
14305
  usage: {
14021
14306
  inputTokens: response.usage.input_tokens,
14022
14307
  outputTokens: response.usage.output_tokens,
14023
14308
  totalTokens: response.usage.input_tokens + response.usage.output_tokens,
14024
- reasoningTokens: (_g = (_f = response.usage.output_tokens_details) == null ? void 0 : _f.reasoning_tokens) != null ? _g : void 0,
14025
- cachedInputTokens: (_i = (_h = response.usage.input_tokens_details) == null ? void 0 : _h.cached_tokens) != null ? _i : void 0
14309
+ reasoningTokens: (_o = (_n = response.usage.output_tokens_details) == null ? void 0 : _n.reasoning_tokens) != null ? _o : void 0,
14310
+ cachedInputTokens: (_q = (_p = response.usage.input_tokens_details) == null ? void 0 : _p.cached_tokens) != null ? _q : void 0
14026
14311
  },
14027
14312
  request: { body },
14028
14313
  response: {
@@ -14032,11 +14317,7 @@ var OpenAIResponsesLanguageModel2 = class {
14032
14317
  headers: responseHeaders,
14033
14318
  body: rawResponse
14034
14319
  },
14035
- providerMetadata: {
14036
- openai: {
14037
- responseId: response.id
14038
- }
14039
- },
14320
+ providerMetadata,
14040
14321
  warnings
14041
14322
  };
14042
14323
  }
@@ -14066,6 +14347,7 @@ var OpenAIResponsesLanguageModel2 = class {
14066
14347
  outputTokens: void 0,
14067
14348
  totalTokens: void 0
14068
14349
  };
14350
+ const logprobs = [];
14069
14351
  let responseId = null;
14070
14352
  const ongoingToolCalls = {};
14071
14353
  let hasToolCalls = false;
@@ -14077,7 +14359,7 @@ var OpenAIResponsesLanguageModel2 = class {
14077
14359
  controller.enqueue({ type: "stream-start", warnings });
14078
14360
  },
14079
14361
  transform(chunk, controller) {
14080
- var _a16, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m;
14362
+ var _a16, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s;
14081
14363
  if (options.includeRawChunks) {
14082
14364
  controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
14083
14365
  }
@@ -14118,6 +14400,16 @@ var OpenAIResponsesLanguageModel2 = class {
14118
14400
  id: value.item.id,
14119
14401
  toolName: "computer_use"
14120
14402
  });
14403
+ } else if (value.item.type === "file_search_call") {
14404
+ ongoingToolCalls[value.output_index] = {
14405
+ toolName: "file_search",
14406
+ toolCallId: value.item.id
14407
+ };
14408
+ controller.enqueue({
14409
+ type: "tool-input-start",
14410
+ id: value.item.id,
14411
+ toolName: "file_search"
14412
+ });
14121
14413
  } else if (value.item.type === "message") {
14122
14414
  controller.enqueue({
14123
14415
  type: "text-start",
@@ -14174,7 +14466,7 @@ var OpenAIResponsesLanguageModel2 = class {
14174
14466
  type: "tool-call",
14175
14467
  toolCallId: value.item.id,
14176
14468
  toolName: "web_search_preview",
14177
- input: "",
14469
+ input: (_c = (_b = value.item.action) == null ? void 0 : _b.query) != null ? _c : "",
14178
14470
  providerExecuted: true
14179
14471
  });
14180
14472
  controller.enqueue({
@@ -14183,7 +14475,10 @@ var OpenAIResponsesLanguageModel2 = class {
14183
14475
  toolName: "web_search_preview",
14184
14476
  result: {
14185
14477
  type: "web_search_tool_result",
14186
- status: value.item.status || "completed"
14478
+ status: value.item.status || "completed",
14479
+ ...((_d = value.item.action) == null ? void 0 : _d.query) && {
14480
+ query: value.item.action.query
14481
+ }
14187
14482
  },
14188
14483
  providerExecuted: true
14189
14484
  });
@@ -14211,6 +14506,32 @@ var OpenAIResponsesLanguageModel2 = class {
14211
14506
  },
14212
14507
  providerExecuted: true
14213
14508
  });
14509
+ } else if (value.item.type === "file_search_call") {
14510
+ ongoingToolCalls[value.output_index] = void 0;
14511
+ hasToolCalls = true;
14512
+ controller.enqueue({
14513
+ type: "tool-input-end",
14514
+ id: value.item.id
14515
+ });
14516
+ controller.enqueue({
14517
+ type: "tool-call",
14518
+ toolCallId: value.item.id,
14519
+ toolName: "file_search",
14520
+ input: "",
14521
+ providerExecuted: true
14522
+ });
14523
+ controller.enqueue({
14524
+ type: "tool-result",
14525
+ toolCallId: value.item.id,
14526
+ toolName: "file_search",
14527
+ result: {
14528
+ type: "file_search_tool_result",
14529
+ status: value.item.status || "completed",
14530
+ ...value.item.queries && { queries: value.item.queries },
14531
+ ...value.item.results && { results: value.item.results }
14532
+ },
14533
+ providerExecuted: true
14534
+ });
14214
14535
  } else if (value.item.type === "message") {
14215
14536
  controller.enqueue({
14216
14537
  type: "text-end",
@@ -14225,7 +14546,7 @@ var OpenAIResponsesLanguageModel2 = class {
14225
14546
  providerMetadata: {
14226
14547
  openai: {
14227
14548
  itemId: value.item.id,
14228
- reasoningEncryptedContent: (_b = value.item.encrypted_content) != null ? _b : null
14549
+ reasoningEncryptedContent: (_e = value.item.encrypted_content) != null ? _e : null
14229
14550
  }
14230
14551
  }
14231
14552
  });
@@ -14255,9 +14576,12 @@ var OpenAIResponsesLanguageModel2 = class {
14255
14576
  id: value.item_id,
14256
14577
  delta: value.delta
14257
14578
  });
14579
+ if (value.logprobs) {
14580
+ logprobs.push(value.logprobs);
14581
+ }
14258
14582
  } else if (isResponseReasoningSummaryPartAddedChunk(value)) {
14259
14583
  if (value.summary_index > 0) {
14260
- (_c = activeReasoning[value.item_id]) == null ? void 0 : _c.summaryParts.push(
14584
+ (_f = activeReasoning[value.item_id]) == null ? void 0 : _f.summaryParts.push(
14261
14585
  value.summary_index
14262
14586
  );
14263
14587
  controller.enqueue({
@@ -14266,7 +14590,7 @@ var OpenAIResponsesLanguageModel2 = class {
14266
14590
  providerMetadata: {
14267
14591
  openai: {
14268
14592
  itemId: value.item_id,
14269
- reasoningEncryptedContent: (_e = (_d = activeReasoning[value.item_id]) == null ? void 0 : _d.encryptedContent) != null ? _e : null
14593
+ reasoningEncryptedContent: (_h = (_g = activeReasoning[value.item_id]) == null ? void 0 : _g.encryptedContent) != null ? _h : null
14270
14594
  }
14271
14595
  }
14272
14596
  });
@@ -14284,36 +14608,51 @@ var OpenAIResponsesLanguageModel2 = class {
14284
14608
  });
14285
14609
  } else if (isResponseFinishedChunk2(value)) {
14286
14610
  finishReason = mapOpenAIResponseFinishReason2({
14287
- finishReason: (_f = value.response.incomplete_details) == null ? void 0 : _f.reason,
14611
+ finishReason: (_i = value.response.incomplete_details) == null ? void 0 : _i.reason,
14288
14612
  hasToolCalls
14289
14613
  });
14290
14614
  usage.inputTokens = value.response.usage.input_tokens;
14291
14615
  usage.outputTokens = value.response.usage.output_tokens;
14292
14616
  usage.totalTokens = value.response.usage.input_tokens + value.response.usage.output_tokens;
14293
- usage.reasoningTokens = (_h = (_g = value.response.usage.output_tokens_details) == null ? void 0 : _g.reasoning_tokens) != null ? _h : void 0;
14294
- usage.cachedInputTokens = (_j = (_i = value.response.usage.input_tokens_details) == null ? void 0 : _i.cached_tokens) != null ? _j : void 0;
14617
+ usage.reasoningTokens = (_k = (_j = value.response.usage.output_tokens_details) == null ? void 0 : _j.reasoning_tokens) != null ? _k : void 0;
14618
+ usage.cachedInputTokens = (_m = (_l = value.response.usage.input_tokens_details) == null ? void 0 : _l.cached_tokens) != null ? _m : void 0;
14295
14619
  } else if (isResponseAnnotationAddedChunk2(value)) {
14296
- controller.enqueue({
14297
- type: "source",
14298
- sourceType: "url",
14299
- id: (_m = (_l = (_k = self.config).generateId) == null ? void 0 : _l.call(_k)) != null ? _m : generateId2(),
14300
- url: value.annotation.url,
14301
- title: value.annotation.title
14302
- });
14620
+ if (value.annotation.type === "url_citation") {
14621
+ controller.enqueue({
14622
+ type: "source",
14623
+ sourceType: "url",
14624
+ id: (_p = (_o = (_n = self.config).generateId) == null ? void 0 : _o.call(_n)) != null ? _p : generateId2(),
14625
+ url: value.annotation.url,
14626
+ title: value.annotation.title
14627
+ });
14628
+ } else if (value.annotation.type === "file_citation") {
14629
+ controller.enqueue({
14630
+ type: "source",
14631
+ sourceType: "document",
14632
+ id: (_s = (_r = (_q = self.config).generateId) == null ? void 0 : _r.call(_q)) != null ? _s : generateId2(),
14633
+ mediaType: "text/plain",
14634
+ title: value.annotation.quote,
14635
+ filename: value.annotation.file_id
14636
+ });
14637
+ }
14303
14638
  } else if (isErrorChunk2(value)) {
14304
14639
  controller.enqueue({ type: "error", error: value });
14305
14640
  }
14306
14641
  },
14307
14642
  flush(controller) {
14643
+ const providerMetadata = {
14644
+ openai: {
14645
+ responseId
14646
+ }
14647
+ };
14648
+ if (logprobs.length > 0) {
14649
+ providerMetadata.openai.logprobs = logprobs;
14650
+ }
14308
14651
  controller.enqueue({
14309
14652
  type: "finish",
14310
14653
  finishReason,
14311
14654
  usage,
14312
- providerMetadata: {
14313
- openai: {
14314
- responseId
14315
- }
14316
- }
14655
+ providerMetadata
14317
14656
  });
14318
14657
  }
14319
14658
  })
@@ -14332,7 +14671,8 @@ var usageSchema22 = z$1.object({
14332
14671
  var textDeltaChunkSchema2 = z$1.object({
14333
14672
  type: z$1.literal("response.output_text.delta"),
14334
14673
  item_id: z$1.string(),
14335
- delta: z$1.string()
14674
+ delta: z$1.string(),
14675
+ logprobs: LOGPROBS_SCHEMA.nullish()
14336
14676
  });
14337
14677
  var errorChunkSchema2 = z$1.object({
14338
14678
  type: z$1.literal("error"),
@@ -14379,12 +14719,32 @@ var responseOutputItemAddedSchema2 = z$1.object({
14379
14719
  z$1.object({
14380
14720
  type: z$1.literal("web_search_call"),
14381
14721
  id: z$1.string(),
14382
- status: z$1.string()
14722
+ status: z$1.string(),
14723
+ action: z$1.object({
14724
+ type: z$1.literal("search"),
14725
+ query: z$1.string().optional()
14726
+ }).nullish()
14383
14727
  }),
14384
14728
  z$1.object({
14385
14729
  type: z$1.literal("computer_call"),
14386
14730
  id: z$1.string(),
14387
14731
  status: z$1.string()
14732
+ }),
14733
+ z$1.object({
14734
+ type: z$1.literal("file_search_call"),
14735
+ id: z$1.string(),
14736
+ status: z$1.string(),
14737
+ queries: z$1.array(z$1.string()).nullish(),
14738
+ results: z$1.array(
14739
+ z$1.object({
14740
+ attributes: z$1.object({
14741
+ file_id: z$1.string(),
14742
+ filename: z$1.string(),
14743
+ score: z$1.number(),
14744
+ text: z$1.string()
14745
+ })
14746
+ })
14747
+ ).optional()
14388
14748
  })
14389
14749
  ])
14390
14750
  });
@@ -14412,12 +14772,32 @@ var responseOutputItemDoneSchema2 = z$1.object({
14412
14772
  z$1.object({
14413
14773
  type: z$1.literal("web_search_call"),
14414
14774
  id: z$1.string(),
14415
- status: z$1.literal("completed")
14775
+ status: z$1.literal("completed"),
14776
+ action: z$1.object({
14777
+ type: z$1.literal("search"),
14778
+ query: z$1.string().optional()
14779
+ }).nullish()
14416
14780
  }),
14417
14781
  z$1.object({
14418
14782
  type: z$1.literal("computer_call"),
14419
14783
  id: z$1.string(),
14420
14784
  status: z$1.literal("completed")
14785
+ }),
14786
+ z$1.object({
14787
+ type: z$1.literal("file_search_call"),
14788
+ id: z$1.string(),
14789
+ status: z$1.literal("completed"),
14790
+ queries: z$1.array(z$1.string()).nullish(),
14791
+ results: z$1.array(
14792
+ z$1.object({
14793
+ attributes: z$1.object({
14794
+ file_id: z$1.string(),
14795
+ filename: z$1.string(),
14796
+ score: z$1.number(),
14797
+ text: z$1.string()
14798
+ })
14799
+ })
14800
+ ).nullish()
14421
14801
  })
14422
14802
  ])
14423
14803
  });
@@ -14429,11 +14809,18 @@ var responseFunctionCallArgumentsDeltaSchema2 = z$1.object({
14429
14809
  });
14430
14810
  var responseAnnotationAddedSchema2 = z$1.object({
14431
14811
  type: z$1.literal("response.output_text.annotation.added"),
14432
- annotation: z$1.object({
14433
- type: z$1.literal("url_citation"),
14434
- url: z$1.string(),
14435
- title: z$1.string()
14436
- })
14812
+ annotation: z$1.discriminatedUnion("type", [
14813
+ z$1.object({
14814
+ type: z$1.literal("url_citation"),
14815
+ url: z$1.string(),
14816
+ title: z$1.string()
14817
+ }),
14818
+ z$1.object({
14819
+ type: z$1.literal("file_citation"),
14820
+ file_id: z$1.string(),
14821
+ quote: z$1.string()
14822
+ })
14823
+ ])
14437
14824
  });
14438
14825
  var responseReasoningSummaryPartAddedSchema = z$1.object({
14439
14826
  type: z$1.literal("response.reasoning_summary_part.added"),
@@ -14497,32 +14884,39 @@ function isErrorChunk2(chunk) {
14497
14884
  return chunk.type === "error";
14498
14885
  }
14499
14886
  function getResponsesModelConfig2(modelId) {
14500
- if (modelId.startsWith("o") || modelId.startsWith("codex-") || modelId.startsWith("computer-use")) {
14887
+ const supportsFlexProcessing2 = modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-chat");
14888
+ const supportsPriorityProcessing2 = modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") && !modelId.startsWith("gpt-5-chat") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
14889
+ const defaults = {
14890
+ requiredAutoTruncation: false,
14891
+ systemMessageMode: "system",
14892
+ supportsFlexProcessing: supportsFlexProcessing2,
14893
+ supportsPriorityProcessing: supportsPriorityProcessing2
14894
+ };
14895
+ if (modelId.startsWith("gpt-5-chat")) {
14896
+ return {
14897
+ ...defaults,
14898
+ isReasoningModel: false
14899
+ };
14900
+ }
14901
+ if (modelId.startsWith("o") || modelId.startsWith("gpt-5") || modelId.startsWith("codex-") || modelId.startsWith("computer-use")) {
14501
14902
  if (modelId.startsWith("o1-mini") || modelId.startsWith("o1-preview")) {
14502
14903
  return {
14904
+ ...defaults,
14503
14905
  isReasoningModel: true,
14504
- systemMessageMode: "remove",
14505
- requiredAutoTruncation: false
14906
+ systemMessageMode: "remove"
14506
14907
  };
14507
14908
  }
14508
14909
  return {
14910
+ ...defaults,
14509
14911
  isReasoningModel: true,
14510
- systemMessageMode: "developer",
14511
- requiredAutoTruncation: false
14912
+ systemMessageMode: "developer"
14512
14913
  };
14513
14914
  }
14514
14915
  return {
14515
- isReasoningModel: false,
14516
- systemMessageMode: "system",
14517
- requiredAutoTruncation: false
14916
+ ...defaults,
14917
+ isReasoningModel: false
14518
14918
  };
14519
14919
  }
14520
- function supportsFlexProcessing2(modelId) {
14521
- return modelId.startsWith("o3") || modelId.startsWith("o4-mini");
14522
- }
14523
- function supportsPriorityProcessing2(modelId) {
14524
- return modelId.startsWith("gpt-4") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
14525
- }
14526
14920
  var openaiResponsesProviderOptionsSchema2 = z$1.object({
14527
14921
  metadata: z$1.any().nullish(),
14528
14922
  parallelToolCalls: z$1.boolean().nullish(),
@@ -14534,7 +14928,29 @@ var openaiResponsesProviderOptionsSchema2 = z$1.object({
14534
14928
  instructions: z$1.string().nullish(),
14535
14929
  reasoningSummary: z$1.string().nullish(),
14536
14930
  serviceTier: z$1.enum(["auto", "flex", "priority"]).nullish(),
14537
- include: z$1.array(z$1.enum(["reasoning.encrypted_content", "file_search_call.results"])).nullish()
14931
+ include: z$1.array(
14932
+ z$1.enum([
14933
+ "reasoning.encrypted_content",
14934
+ "file_search_call.results",
14935
+ "message.output_text.logprobs"
14936
+ ])
14937
+ ).nullish(),
14938
+ textVerbosity: z$1.enum(["low", "medium", "high"]).nullish(),
14939
+ promptCacheKey: z$1.string().nullish(),
14940
+ safetyIdentifier: z$1.string().nullish(),
14941
+ /**
14942
+ * Return the log probabilities of the tokens.
14943
+ *
14944
+ * Setting to true will return the log probabilities of the tokens that
14945
+ * were generated.
14946
+ *
14947
+ * Setting to a number will return the log probabilities of the top n
14948
+ * tokens that were generated.
14949
+ *
14950
+ * @see https://platform.openai.com/docs/api-reference/responses/create
14951
+ * @see https://cookbook.openai.com/examples/using_logprobs
14952
+ */
14953
+ logprobs: z$1.union([z$1.boolean(), z$1.number().min(1).max(TOP_LOGPROBS_MAX)]).optional()
14538
14954
  });
14539
14955
  var OpenAIProviderOptionsSchema2 = z$1.object({
14540
14956
  instructions: z$1.string().nullish(),
@@ -14639,6 +15055,206 @@ var OpenAISpeechModel2 = class {
14639
15055
  };
14640
15056
  }
14641
15057
  };
15058
+ var openAITranscriptionProviderOptions = z$1.object({
15059
+ /**
15060
+ * Additional information to include in the transcription response.
15061
+ */
15062
+ include: z$1.array(z$1.string()).optional(),
15063
+ /**
15064
+ * The language of the input audio in ISO-639-1 format.
15065
+ */
15066
+ language: z$1.string().optional(),
15067
+ /**
15068
+ * An optional text to guide the model's style or continue a previous audio segment.
15069
+ */
15070
+ prompt: z$1.string().optional(),
15071
+ /**
15072
+ * The sampling temperature, between 0 and 1.
15073
+ * @default 0
15074
+ */
15075
+ temperature: z$1.number().min(0).max(1).default(0).optional(),
15076
+ /**
15077
+ * The timestamp granularities to populate for this transcription.
15078
+ * @default ['segment']
15079
+ */
15080
+ timestampGranularities: z$1.array(z$1.enum(["word", "segment"])).default(["segment"]).optional()
15081
+ });
15082
+ var languageMap2 = {
15083
+ afrikaans: "af",
15084
+ arabic: "ar",
15085
+ armenian: "hy",
15086
+ azerbaijani: "az",
15087
+ belarusian: "be",
15088
+ bosnian: "bs",
15089
+ bulgarian: "bg",
15090
+ catalan: "ca",
15091
+ chinese: "zh",
15092
+ croatian: "hr",
15093
+ czech: "cs",
15094
+ danish: "da",
15095
+ dutch: "nl",
15096
+ english: "en",
15097
+ estonian: "et",
15098
+ finnish: "fi",
15099
+ french: "fr",
15100
+ galician: "gl",
15101
+ german: "de",
15102
+ greek: "el",
15103
+ hebrew: "he",
15104
+ hindi: "hi",
15105
+ hungarian: "hu",
15106
+ icelandic: "is",
15107
+ indonesian: "id",
15108
+ italian: "it",
15109
+ japanese: "ja",
15110
+ kannada: "kn",
15111
+ kazakh: "kk",
15112
+ korean: "ko",
15113
+ latvian: "lv",
15114
+ lithuanian: "lt",
15115
+ macedonian: "mk",
15116
+ malay: "ms",
15117
+ marathi: "mr",
15118
+ maori: "mi",
15119
+ nepali: "ne",
15120
+ norwegian: "no",
15121
+ persian: "fa",
15122
+ polish: "pl",
15123
+ portuguese: "pt",
15124
+ romanian: "ro",
15125
+ russian: "ru",
15126
+ serbian: "sr",
15127
+ slovak: "sk",
15128
+ slovenian: "sl",
15129
+ spanish: "es",
15130
+ swahili: "sw",
15131
+ swedish: "sv",
15132
+ tagalog: "tl",
15133
+ tamil: "ta",
15134
+ thai: "th",
15135
+ turkish: "tr",
15136
+ ukrainian: "uk",
15137
+ urdu: "ur",
15138
+ vietnamese: "vi",
15139
+ welsh: "cy"
15140
+ };
15141
+ var OpenAITranscriptionModel2 = class {
15142
+ constructor(modelId, config) {
15143
+ this.modelId = modelId;
15144
+ this.config = config;
15145
+ this.specificationVersion = "v2";
15146
+ }
15147
+ get provider() {
15148
+ return this.config.provider;
15149
+ }
15150
+ async getArgs({
15151
+ audio,
15152
+ mediaType,
15153
+ providerOptions
15154
+ }) {
15155
+ const warnings = [];
15156
+ const openAIOptions = await parseProviderOptions2({
15157
+ provider: "openai",
15158
+ providerOptions,
15159
+ schema: openAITranscriptionProviderOptions
15160
+ });
15161
+ const formData = new FormData();
15162
+ const blob = audio instanceof Uint8Array ? new Blob([audio]) : new Blob([convertBase64ToUint8Array2(audio)]);
15163
+ formData.append("model", this.modelId);
15164
+ formData.append("file", new File([blob], "audio", { type: mediaType }));
15165
+ if (openAIOptions) {
15166
+ const transcriptionModelOptions = {
15167
+ include: openAIOptions.include,
15168
+ language: openAIOptions.language,
15169
+ prompt: openAIOptions.prompt,
15170
+ response_format: "verbose_json",
15171
+ // always use verbose_json to get segments
15172
+ temperature: openAIOptions.temperature,
15173
+ timestamp_granularities: openAIOptions.timestampGranularities
15174
+ };
15175
+ for (const [key, value] of Object.entries(transcriptionModelOptions)) {
15176
+ if (value != null) {
15177
+ formData.append(key, String(value));
15178
+ }
15179
+ }
15180
+ }
15181
+ return {
15182
+ formData,
15183
+ warnings
15184
+ };
15185
+ }
15186
+ async doGenerate(options) {
15187
+ var _a16, _b, _c, _d, _e, _f, _g, _h;
15188
+ const currentDate = (_c = (_b = (_a16 = this.config._internal) == null ? void 0 : _a16.currentDate) == null ? void 0 : _b.call(_a16)) != null ? _c : /* @__PURE__ */ new Date();
15189
+ const { formData, warnings } = await this.getArgs(options);
15190
+ const {
15191
+ value: response,
15192
+ responseHeaders,
15193
+ rawValue: rawResponse
15194
+ } = await postFormDataToApi2({
15195
+ url: this.config.url({
15196
+ path: "/audio/transcriptions",
15197
+ modelId: this.modelId
15198
+ }),
15199
+ headers: combineHeaders2(this.config.headers(), options.headers),
15200
+ formData,
15201
+ failedResponseHandler: openaiFailedResponseHandler2,
15202
+ successfulResponseHandler: createJsonResponseHandler2(
15203
+ openaiTranscriptionResponseSchema2
15204
+ ),
15205
+ abortSignal: options.abortSignal,
15206
+ fetch: this.config.fetch
15207
+ });
15208
+ const language = response.language != null && response.language in languageMap2 ? languageMap2[response.language] : void 0;
15209
+ return {
15210
+ text: response.text,
15211
+ segments: (_g = (_f = (_d = response.segments) == null ? void 0 : _d.map((segment) => ({
15212
+ text: segment.text,
15213
+ startSecond: segment.start,
15214
+ endSecond: segment.end
15215
+ }))) != null ? _f : (_e = response.words) == null ? void 0 : _e.map((word) => ({
15216
+ text: word.word,
15217
+ startSecond: word.start,
15218
+ endSecond: word.end
15219
+ }))) != null ? _g : [],
15220
+ language,
15221
+ durationInSeconds: (_h = response.duration) != null ? _h : void 0,
15222
+ warnings,
15223
+ response: {
15224
+ timestamp: currentDate,
15225
+ modelId: this.modelId,
15226
+ headers: responseHeaders,
15227
+ body: rawResponse
15228
+ }
15229
+ };
15230
+ }
15231
+ };
15232
+ var openaiTranscriptionResponseSchema2 = z$1.object({
15233
+ text: z$1.string(),
15234
+ language: z$1.string().nullish(),
15235
+ duration: z$1.number().nullish(),
15236
+ words: z$1.array(
15237
+ z$1.object({
15238
+ word: z$1.string(),
15239
+ start: z$1.number(),
15240
+ end: z$1.number()
15241
+ })
15242
+ ).nullish(),
15243
+ segments: z$1.array(
15244
+ z$1.object({
15245
+ id: z$1.number(),
15246
+ seek: z$1.number(),
15247
+ start: z$1.number(),
15248
+ end: z$1.number(),
15249
+ text: z$1.string(),
15250
+ tokens: z$1.array(z$1.number()),
15251
+ temperature: z$1.number(),
15252
+ avg_logprob: z$1.number(),
15253
+ compression_ratio: z$1.number(),
15254
+ no_speech_prob: z$1.number()
15255
+ })
15256
+ ).nullish()
15257
+ });
14642
15258
  function createOpenAI2(options = {}) {
14643
15259
  var _a16, _b;
14644
15260
  const baseURL = (_a16 = withoutTrailingSlash2(options.baseURL)) != null ? _a16 : "https://api.openai.com/v1";
@@ -14702,7 +15318,8 @@ function createOpenAI2(options = {}) {
14702
15318
  provider: `${providerName}.responses`,
14703
15319
  url: ({ path }) => `${baseURL}${path}`,
14704
15320
  headers: getHeaders,
14705
- fetch: options.fetch
15321
+ fetch: options.fetch,
15322
+ fileIdPrefixes: ["file-"]
14706
15323
  });
14707
15324
  };
14708
15325
  const provider = function(modelId) {
@@ -14869,7 +15486,7 @@ var defaultOpenAICompatibleErrorStructure = {
14869
15486
  errorSchema: openaiCompatibleErrorDataSchema,
14870
15487
  errorToMessage: (data) => data.error.message
14871
15488
  };
14872
- function prepareTools9({
15489
+ function prepareTools8({
14873
15490
  mode,
14874
15491
  structuredOutputs
14875
15492
  }) {
@@ -15005,7 +15622,7 @@ var OpenAICompatibleChatLanguageModel = class {
15005
15622
  };
15006
15623
  switch (type) {
15007
15624
  case "regular": {
15008
- const { tools, tool_choice, toolWarnings } = prepareTools9({
15625
+ const { tools, tool_choice, toolWarnings } = prepareTools8({
15009
15626
  mode,
15010
15627
  structuredOutputs: this.supportsStructuredOutputs
15011
15628
  });
@@ -15692,6 +16309,7 @@ z$1.object({
15692
16309
  role: z$1.literal("assistant").nullish(),
15693
16310
  content: z$1.string().nullish(),
15694
16311
  reasoning_content: z$1.string().nullish(),
16312
+ reasoning: z$1.string().nullish(),
15695
16313
  tool_calls: z$1.array(
15696
16314
  z$1.object({
15697
16315
  id: z$1.string().nullish(),
@@ -15760,7 +16378,8 @@ z$1.object({
15760
16378
  });
15761
16379
  z$1.object({
15762
16380
  data: z$1.array(z$1.object({ embedding: z$1.array(z$1.number()) })),
15763
- usage: z$1.object({ prompt_tokens: z$1.number() }).nullish()
16381
+ usage: z$1.object({ prompt_tokens: z$1.number() }).nullish(),
16382
+ providerMetadata: z$1.record(z$1.string(), z$1.record(z$1.string(), z$1.any())).optional()
15764
16383
  });
15765
16384
  var OpenAICompatibleImageModel2 = class {
15766
16385
  constructor(modelId, config) {
@@ -16038,7 +16657,7 @@ var xaiFailedResponseHandler = createJsonErrorResponseHandler2({
16038
16657
  errorSchema: xaiErrorDataSchema,
16039
16658
  errorToMessage: (data) => data.error.message
16040
16659
  });
16041
- function prepareTools10({
16660
+ function prepareTools9({
16042
16661
  tools,
16043
16662
  toolChoice
16044
16663
  }) {
@@ -16160,7 +16779,7 @@ var XaiChatLanguageModel = class {
16160
16779
  tools: xaiTools,
16161
16780
  toolChoice: xaiToolChoice,
16162
16781
  toolWarnings
16163
- } = prepareTools10({
16782
+ } = prepareTools9({
16164
16783
  tools,
16165
16784
  toolChoice
16166
16785
  });
@@ -16968,5 +17587,5 @@ async function updateAgentModelHandler({
16968
17587
  }
16969
17588
 
16970
17589
  export { agents_exports, generateHandler, generateVNextHandler, getAgentByIdHandler, getAgentsHandler, getEvalsByAgentIdHandler, getLiveEvalsByAgentIdHandler, getSerializedAgentTools, streamGenerateHandler, streamVNextGenerateHandler, streamVNextUIMessageHandler, updateAgentModelHandler };
16971
- //# sourceMappingURL=chunk-PREC2QAP.js.map
16972
- //# sourceMappingURL=chunk-PREC2QAP.js.map
17590
+ //# sourceMappingURL=chunk-22OZ4DMU.js.map
17591
+ //# sourceMappingURL=chunk-22OZ4DMU.js.map