@ai-sdk/openai 2.0.29 → 2.0.31

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -2138,35 +2138,35 @@ var OpenAISpeechModel = class {
2138
2138
  // src/responses/openai-responses-language-model.ts
2139
2139
  var import_provider8 = require("@ai-sdk/provider");
2140
2140
  var import_provider_utils15 = require("@ai-sdk/provider-utils");
2141
- var import_v417 = require("zod/v4");
2141
+ var import_v418 = require("zod/v4");
2142
2142
 
2143
- // src/responses/convert-to-openai-responses-messages.ts
2143
+ // src/responses/convert-to-openai-responses-input.ts
2144
2144
  var import_provider6 = require("@ai-sdk/provider");
2145
2145
  var import_provider_utils11 = require("@ai-sdk/provider-utils");
2146
2146
  var import_v414 = require("zod/v4");
2147
- var import_provider_utils12 = require("@ai-sdk/provider-utils");
2148
2147
  function isFileId(data, prefixes) {
2149
2148
  if (!prefixes) return false;
2150
2149
  return prefixes.some((prefix) => data.startsWith(prefix));
2151
2150
  }
2152
- async function convertToOpenAIResponsesMessages({
2151
+ async function convertToOpenAIResponsesInput({
2153
2152
  prompt,
2154
2153
  systemMessageMode,
2155
- fileIdPrefixes
2154
+ fileIdPrefixes,
2155
+ store
2156
2156
  }) {
2157
2157
  var _a, _b, _c, _d, _e, _f;
2158
- const messages = [];
2158
+ const input = [];
2159
2159
  const warnings = [];
2160
2160
  for (const { role, content } of prompt) {
2161
2161
  switch (role) {
2162
2162
  case "system": {
2163
2163
  switch (systemMessageMode) {
2164
2164
  case "system": {
2165
- messages.push({ role: "system", content });
2165
+ input.push({ role: "system", content });
2166
2166
  break;
2167
2167
  }
2168
2168
  case "developer": {
2169
- messages.push({ role: "developer", content });
2169
+ input.push({ role: "developer", content });
2170
2170
  break;
2171
2171
  }
2172
2172
  case "remove": {
@@ -2186,7 +2186,7 @@ async function convertToOpenAIResponsesMessages({
2186
2186
  break;
2187
2187
  }
2188
2188
  case "user": {
2189
- messages.push({
2189
+ input.push({
2190
2190
  role: "user",
2191
2191
  content: content.map((part, index) => {
2192
2192
  var _a2, _b2, _c2;
@@ -2200,7 +2200,7 @@ async function convertToOpenAIResponsesMessages({
2200
2200
  return {
2201
2201
  type: "input_image",
2202
2202
  ...part.data instanceof URL ? { image_url: part.data.toString() } : typeof part.data === "string" && isFileId(part.data, fileIdPrefixes) ? { file_id: part.data } : {
2203
- image_url: `data:${mediaType};base64,${(0, import_provider_utils12.convertToBase64)(part.data)}`
2203
+ image_url: `data:${mediaType};base64,${(0, import_provider_utils11.convertToBase64)(part.data)}`
2204
2204
  },
2205
2205
  detail: (_b2 = (_a2 = part.providerOptions) == null ? void 0 : _a2.openai) == null ? void 0 : _b2.imageDetail
2206
2206
  };
@@ -2215,7 +2215,7 @@ async function convertToOpenAIResponsesMessages({
2215
2215
  type: "input_file",
2216
2216
  ...typeof part.data === "string" && isFileId(part.data, fileIdPrefixes) ? { file_id: part.data } : {
2217
2217
  filename: (_c2 = part.filename) != null ? _c2 : `part-${index}.pdf`,
2218
- file_data: `data:application/pdf;base64,${(0, import_provider_utils12.convertToBase64)(part.data)}`
2218
+ file_data: `data:application/pdf;base64,${(0, import_provider_utils11.convertToBase64)(part.data)}`
2219
2219
  }
2220
2220
  };
2221
2221
  } else {
@@ -2231,10 +2231,11 @@ async function convertToOpenAIResponsesMessages({
2231
2231
  }
2232
2232
  case "assistant": {
2233
2233
  const reasoningMessages = {};
2234
+ const toolCallParts = {};
2234
2235
  for (const part of content) {
2235
2236
  switch (part.type) {
2236
2237
  case "text": {
2237
- messages.push({
2238
+ input.push({
2238
2239
  role: "assistant",
2239
2240
  content: [{ type: "output_text", text: part.text }],
2240
2241
  id: (_c = (_b = (_a = part.providerOptions) == null ? void 0 : _a.openai) == null ? void 0 : _b.itemId) != null ? _c : void 0
@@ -2242,10 +2243,11 @@ async function convertToOpenAIResponsesMessages({
2242
2243
  break;
2243
2244
  }
2244
2245
  case "tool-call": {
2246
+ toolCallParts[part.toolCallId] = part;
2245
2247
  if (part.providerExecuted) {
2246
2248
  break;
2247
2249
  }
2248
- messages.push({
2250
+ input.push({
2249
2251
  type: "function_call",
2250
2252
  call_id: part.toolCallId,
2251
2253
  name: part.toolName,
@@ -2255,10 +2257,14 @@ async function convertToOpenAIResponsesMessages({
2255
2257
  break;
2256
2258
  }
2257
2259
  case "tool-result": {
2258
- warnings.push({
2259
- type: "other",
2260
- message: `tool result parts in assistant messages are not supported for OpenAI responses`
2261
- });
2260
+ if (store) {
2261
+ input.push({ type: "item_reference", id: part.toolCallId });
2262
+ } else {
2263
+ warnings.push({
2264
+ type: "other",
2265
+ message: `Results for OpenAI tool ${part.toolName} are not sent to the API when store is false`
2266
+ });
2267
+ }
2262
2268
  break;
2263
2269
  }
2264
2270
  case "reasoning": {
@@ -2286,7 +2292,7 @@ async function convertToOpenAIResponsesMessages({
2286
2292
  encrypted_content: providerOptions == null ? void 0 : providerOptions.reasoningEncryptedContent,
2287
2293
  summary: summaryParts
2288
2294
  };
2289
- messages.push(reasoningMessages[reasoningId]);
2295
+ input.push(reasoningMessages[reasoningId]);
2290
2296
  } else {
2291
2297
  existingReasoningMessage.summary.push(...summaryParts);
2292
2298
  }
@@ -2317,7 +2323,7 @@ async function convertToOpenAIResponsesMessages({
2317
2323
  contentValue = JSON.stringify(output.value);
2318
2324
  break;
2319
2325
  }
2320
- messages.push({
2326
+ input.push({
2321
2327
  type: "function_call_output",
2322
2328
  call_id: part.toolCallId,
2323
2329
  output: contentValue
@@ -2331,7 +2337,7 @@ async function convertToOpenAIResponsesMessages({
2331
2337
  }
2332
2338
  }
2333
2339
  }
2334
- return { messages, warnings };
2340
+ return { input, warnings };
2335
2341
  }
2336
2342
  var openaiResponsesReasoningProviderOptionsSchema = import_v414.z.object({
2337
2343
  itemId: import_v414.z.string().nullish(),
@@ -2360,7 +2366,7 @@ function mapOpenAIResponseFinishReason({
2360
2366
  var import_provider7 = require("@ai-sdk/provider");
2361
2367
 
2362
2368
  // src/tool/code-interpreter.ts
2363
- var import_provider_utils13 = require("@ai-sdk/provider-utils");
2369
+ var import_provider_utils12 = require("@ai-sdk/provider-utils");
2364
2370
  var import_v415 = require("zod/v4");
2365
2371
  var codeInterpreterInputSchema = import_v415.z.object({
2366
2372
  code: import_v415.z.string().nullish(),
@@ -2382,7 +2388,7 @@ var codeInterpreterArgsSchema = import_v415.z.object({
2382
2388
  })
2383
2389
  ]).optional()
2384
2390
  });
2385
- var codeInterpreterToolFactory = (0, import_provider_utils13.createProviderDefinedToolFactoryWithOutputSchema)({
2391
+ var codeInterpreterToolFactory = (0, import_provider_utils12.createProviderDefinedToolFactoryWithOutputSchema)({
2386
2392
  id: "openai.code_interpreter",
2387
2393
  name: "code_interpreter",
2388
2394
  inputSchema: codeInterpreterInputSchema,
@@ -2390,7 +2396,7 @@ var codeInterpreterToolFactory = (0, import_provider_utils13.createProviderDefin
2390
2396
  });
2391
2397
 
2392
2398
  // src/tool/web-search.ts
2393
- var import_provider_utils14 = require("@ai-sdk/provider-utils");
2399
+ var import_provider_utils13 = require("@ai-sdk/provider-utils");
2394
2400
  var import_v416 = require("zod/v4");
2395
2401
  var webSearchArgsSchema = import_v416.z.object({
2396
2402
  filters: import_v416.z.object({
@@ -2405,7 +2411,7 @@ var webSearchArgsSchema = import_v416.z.object({
2405
2411
  timezone: import_v416.z.string().optional()
2406
2412
  }).optional()
2407
2413
  });
2408
- var webSearchToolFactory = (0, import_provider_utils14.createProviderDefinedToolFactory)({
2414
+ var webSearchToolFactory = (0, import_provider_utils13.createProviderDefinedToolFactory)({
2409
2415
  id: "openai.web_search",
2410
2416
  name: "web_search",
2411
2417
  inputSchema: import_v416.z.object({
@@ -2427,6 +2433,33 @@ var webSearchToolFactory = (0, import_provider_utils14.createProviderDefinedTool
2427
2433
  })
2428
2434
  });
2429
2435
 
2436
+ // src/tool/image-generation.ts
2437
+ var import_provider_utils14 = require("@ai-sdk/provider-utils");
2438
+ var import_v417 = require("zod/v4");
2439
+ var imageGenerationArgsSchema = import_v417.z.object({
2440
+ background: import_v417.z.enum(["auto", "opaque", "transparent"]).optional(),
2441
+ inputFidelity: import_v417.z.enum(["low", "high"]).optional(),
2442
+ inputImageMask: import_v417.z.object({
2443
+ fileId: import_v417.z.string().optional(),
2444
+ imageUrl: import_v417.z.string().optional()
2445
+ }).optional(),
2446
+ model: import_v417.z.string().optional(),
2447
+ moderation: import_v417.z.enum(["auto"]).optional(),
2448
+ outputCompression: import_v417.z.number().int().min(0).max(100).optional(),
2449
+ outputFormat: import_v417.z.enum(["png", "jpeg", "webp"]).optional(),
2450
+ quality: import_v417.z.enum(["auto", "low", "medium", "high"]).optional(),
2451
+ size: import_v417.z.enum(["1024x1024", "1024x1536", "1536x1024", "auto"]).optional()
2452
+ }).strict();
2453
+ var imageGenerationOutputSchema = import_v417.z.object({
2454
+ result: import_v417.z.string()
2455
+ });
2456
+ var imageGenerationToolFactory = (0, import_provider_utils14.createProviderDefinedToolFactoryWithOutputSchema)({
2457
+ id: "openai.image_generation",
2458
+ name: "image_generation",
2459
+ inputSchema: import_v417.z.object({}),
2460
+ outputSchema: imageGenerationOutputSchema
2461
+ });
2462
+
2430
2463
  // src/responses/openai-responses-prepare-tools.ts
2431
2464
  function prepareResponsesTools({
2432
2465
  tools,
@@ -2490,8 +2523,23 @@ function prepareResponsesTools({
2490
2523
  });
2491
2524
  break;
2492
2525
  }
2493
- default: {
2494
- toolWarnings.push({ type: "unsupported-tool", tool });
2526
+ case "openai.image_generation": {
2527
+ const args = imageGenerationArgsSchema.parse(tool.args);
2528
+ openaiTools.push({
2529
+ type: "image_generation",
2530
+ background: args.background,
2531
+ input_fidelity: args.inputFidelity,
2532
+ input_image_mask: args.inputImageMask ? {
2533
+ file_id: args.inputImageMask.fileId,
2534
+ image_url: args.inputImageMask.imageUrl
2535
+ } : void 0,
2536
+ model: args.model,
2537
+ size: args.size,
2538
+ quality: args.quality,
2539
+ moderation: args.moderation,
2540
+ output_format: args.outputFormat,
2541
+ output_compression: args.outputCompression
2542
+ });
2495
2543
  break;
2496
2544
  }
2497
2545
  }
@@ -2514,7 +2562,7 @@ function prepareResponsesTools({
2514
2562
  case "tool":
2515
2563
  return {
2516
2564
  tools: openaiTools,
2517
- toolChoice: toolChoice.toolName === "code_interpreter" || toolChoice.toolName === "file_search" || toolChoice.toolName === "web_search_preview" || toolChoice.toolName === "web_search" ? { type: toolChoice.toolName } : { type: "function", name: toolChoice.toolName },
2565
+ toolChoice: toolChoice.toolName === "code_interpreter" || toolChoice.toolName === "file_search" || toolChoice.toolName === "image_generation" || toolChoice.toolName === "web_search_preview" || toolChoice.toolName === "web_search" ? { type: toolChoice.toolName } : { type: "function", name: toolChoice.toolName },
2518
2566
  toolWarnings
2519
2567
  };
2520
2568
  default: {
@@ -2527,47 +2575,52 @@ function prepareResponsesTools({
2527
2575
  }
2528
2576
 
2529
2577
  // src/responses/openai-responses-language-model.ts
2530
- var webSearchCallItem = import_v417.z.object({
2531
- type: import_v417.z.literal("web_search_call"),
2532
- id: import_v417.z.string(),
2533
- status: import_v417.z.string(),
2534
- action: import_v417.z.discriminatedUnion("type", [
2535
- import_v417.z.object({
2536
- type: import_v417.z.literal("search"),
2537
- query: import_v417.z.string().nullish()
2578
+ var webSearchCallItem = import_v418.z.object({
2579
+ type: import_v418.z.literal("web_search_call"),
2580
+ id: import_v418.z.string(),
2581
+ status: import_v418.z.string(),
2582
+ action: import_v418.z.discriminatedUnion("type", [
2583
+ import_v418.z.object({
2584
+ type: import_v418.z.literal("search"),
2585
+ query: import_v418.z.string().nullish()
2538
2586
  }),
2539
- import_v417.z.object({
2540
- type: import_v417.z.literal("open_page"),
2541
- url: import_v417.z.string()
2587
+ import_v418.z.object({
2588
+ type: import_v418.z.literal("open_page"),
2589
+ url: import_v418.z.string()
2542
2590
  }),
2543
- import_v417.z.object({
2544
- type: import_v417.z.literal("find"),
2545
- url: import_v417.z.string(),
2546
- pattern: import_v417.z.string()
2591
+ import_v418.z.object({
2592
+ type: import_v418.z.literal("find"),
2593
+ url: import_v418.z.string(),
2594
+ pattern: import_v418.z.string()
2547
2595
  })
2548
2596
  ]).nullish()
2549
2597
  });
2550
- var codeInterpreterCallItem = import_v417.z.object({
2551
- type: import_v417.z.literal("code_interpreter_call"),
2552
- id: import_v417.z.string(),
2553
- code: import_v417.z.string().nullable(),
2554
- container_id: import_v417.z.string(),
2555
- outputs: import_v417.z.array(
2556
- import_v417.z.discriminatedUnion("type", [
2557
- import_v417.z.object({ type: import_v417.z.literal("logs"), logs: import_v417.z.string() }),
2558
- import_v417.z.object({ type: import_v417.z.literal("image"), url: import_v417.z.string() })
2598
+ var codeInterpreterCallItem = import_v418.z.object({
2599
+ type: import_v418.z.literal("code_interpreter_call"),
2600
+ id: import_v418.z.string(),
2601
+ code: import_v418.z.string().nullable(),
2602
+ container_id: import_v418.z.string(),
2603
+ outputs: import_v418.z.array(
2604
+ import_v418.z.discriminatedUnion("type", [
2605
+ import_v418.z.object({ type: import_v418.z.literal("logs"), logs: import_v418.z.string() }),
2606
+ import_v418.z.object({ type: import_v418.z.literal("image"), url: import_v418.z.string() })
2559
2607
  ])
2560
2608
  ).nullable()
2561
2609
  });
2610
+ var imageGenerationCallItem = import_v418.z.object({
2611
+ type: import_v418.z.literal("image_generation_call"),
2612
+ id: import_v418.z.string(),
2613
+ result: import_v418.z.string()
2614
+ });
2562
2615
  var TOP_LOGPROBS_MAX = 20;
2563
- var LOGPROBS_SCHEMA = import_v417.z.array(
2564
- import_v417.z.object({
2565
- token: import_v417.z.string(),
2566
- logprob: import_v417.z.number(),
2567
- top_logprobs: import_v417.z.array(
2568
- import_v417.z.object({
2569
- token: import_v417.z.string(),
2570
- logprob: import_v417.z.number()
2616
+ var LOGPROBS_SCHEMA = import_v418.z.array(
2617
+ import_v418.z.object({
2618
+ token: import_v418.z.string(),
2619
+ logprob: import_v418.z.number(),
2620
+ top_logprobs: import_v418.z.array(
2621
+ import_v418.z.object({
2622
+ token: import_v418.z.string(),
2623
+ logprob: import_v418.z.number()
2571
2624
  })
2572
2625
  )
2573
2626
  })
@@ -2600,7 +2653,7 @@ var OpenAIResponsesLanguageModel = class {
2600
2653
  toolChoice,
2601
2654
  responseFormat
2602
2655
  }) {
2603
- var _a, _b, _c, _d;
2656
+ var _a, _b, _c, _d, _e;
2604
2657
  const warnings = [];
2605
2658
  const modelConfig = getResponsesModelConfig(this.modelId);
2606
2659
  if (topK != null) {
@@ -2624,32 +2677,33 @@ var OpenAIResponsesLanguageModel = class {
2624
2677
  if (stopSequences != null) {
2625
2678
  warnings.push({ type: "unsupported-setting", setting: "stopSequences" });
2626
2679
  }
2627
- const { messages, warnings: messageWarnings } = await convertToOpenAIResponsesMessages({
2628
- prompt,
2629
- systemMessageMode: modelConfig.systemMessageMode,
2630
- fileIdPrefixes: this.config.fileIdPrefixes
2631
- });
2632
- warnings.push(...messageWarnings);
2633
2680
  const openaiOptions = await (0, import_provider_utils15.parseProviderOptions)({
2634
2681
  provider: "openai",
2635
2682
  providerOptions,
2636
2683
  schema: openaiResponsesProviderOptionsSchema
2637
2684
  });
2638
- const strictJsonSchema = (_a = openaiOptions == null ? void 0 : openaiOptions.strictJsonSchema) != null ? _a : false;
2685
+ const { input, warnings: inputWarnings } = await convertToOpenAIResponsesInput({
2686
+ prompt,
2687
+ systemMessageMode: modelConfig.systemMessageMode,
2688
+ fileIdPrefixes: this.config.fileIdPrefixes,
2689
+ store: (_a = openaiOptions == null ? void 0 : openaiOptions.store) != null ? _a : true
2690
+ });
2691
+ warnings.push(...inputWarnings);
2692
+ const strictJsonSchema = (_b = openaiOptions == null ? void 0 : openaiOptions.strictJsonSchema) != null ? _b : false;
2639
2693
  let include = openaiOptions == null ? void 0 : openaiOptions.include;
2640
2694
  const topLogprobs = typeof (openaiOptions == null ? void 0 : openaiOptions.logprobs) === "number" ? openaiOptions == null ? void 0 : openaiOptions.logprobs : (openaiOptions == null ? void 0 : openaiOptions.logprobs) === true ? TOP_LOGPROBS_MAX : void 0;
2641
2695
  include = topLogprobs ? Array.isArray(include) ? [...include, "message.output_text.logprobs"] : ["message.output_text.logprobs"] : include;
2642
- const webSearchToolName = (_b = tools == null ? void 0 : tools.find(
2696
+ const webSearchToolName = (_c = tools == null ? void 0 : tools.find(
2643
2697
  (tool) => tool.type === "provider-defined" && (tool.id === "openai.web_search" || tool.id === "openai.web_search_preview")
2644
- )) == null ? void 0 : _b.name;
2698
+ )) == null ? void 0 : _c.name;
2645
2699
  include = webSearchToolName ? Array.isArray(include) ? [...include, "web_search_call.action.sources"] : ["web_search_call.action.sources"] : include;
2646
- const codeInterpreterToolName = (_c = tools == null ? void 0 : tools.find(
2700
+ const codeInterpreterToolName = (_d = tools == null ? void 0 : tools.find(
2647
2701
  (tool) => tool.type === "provider-defined" && tool.id === "openai.code_interpreter"
2648
- )) == null ? void 0 : _c.name;
2702
+ )) == null ? void 0 : _d.name;
2649
2703
  include = codeInterpreterToolName ? Array.isArray(include) ? [...include, "code_interpreter_call.outputs"] : ["code_interpreter_call.outputs"] : include;
2650
2704
  const baseArgs = {
2651
2705
  model: this.modelId,
2652
- input: messages,
2706
+ input,
2653
2707
  temperature,
2654
2708
  top_p: topP,
2655
2709
  max_output_tokens: maxOutputTokens,
@@ -2659,7 +2713,7 @@ var OpenAIResponsesLanguageModel = class {
2659
2713
  format: responseFormat.schema != null ? {
2660
2714
  type: "json_schema",
2661
2715
  strict: strictJsonSchema,
2662
- name: (_d = responseFormat.name) != null ? _d : "response",
2716
+ name: (_e = responseFormat.name) != null ? _e : "response",
2663
2717
  description: responseFormat.description,
2664
2718
  schema: responseFormat.schema
2665
2719
  } : { type: "json_object" }
@@ -2670,6 +2724,7 @@ var OpenAIResponsesLanguageModel = class {
2670
2724
  }
2671
2725
  },
2672
2726
  // provider options:
2727
+ max_tool_calls: openaiOptions == null ? void 0 : openaiOptions.maxToolCalls,
2673
2728
  metadata: openaiOptions == null ? void 0 : openaiOptions.metadata,
2674
2729
  parallel_tool_calls: openaiOptions == null ? void 0 : openaiOptions.parallelToolCalls,
2675
2730
  previous_response_id: openaiOptions == null ? void 0 : openaiOptions.previousResponseId,
@@ -2785,45 +2840,45 @@ var OpenAIResponsesLanguageModel = class {
2785
2840
  body,
2786
2841
  failedResponseHandler: openaiFailedResponseHandler,
2787
2842
  successfulResponseHandler: (0, import_provider_utils15.createJsonResponseHandler)(
2788
- import_v417.z.object({
2789
- id: import_v417.z.string(),
2790
- created_at: import_v417.z.number(),
2791
- error: import_v417.z.object({
2792
- code: import_v417.z.string(),
2793
- message: import_v417.z.string()
2843
+ import_v418.z.object({
2844
+ id: import_v418.z.string(),
2845
+ created_at: import_v418.z.number(),
2846
+ error: import_v418.z.object({
2847
+ code: import_v418.z.string(),
2848
+ message: import_v418.z.string()
2794
2849
  }).nullish(),
2795
- model: import_v417.z.string(),
2796
- output: import_v417.z.array(
2797
- import_v417.z.discriminatedUnion("type", [
2798
- import_v417.z.object({
2799
- type: import_v417.z.literal("message"),
2800
- role: import_v417.z.literal("assistant"),
2801
- id: import_v417.z.string(),
2802
- content: import_v417.z.array(
2803
- import_v417.z.object({
2804
- type: import_v417.z.literal("output_text"),
2805
- text: import_v417.z.string(),
2850
+ model: import_v418.z.string(),
2851
+ output: import_v418.z.array(
2852
+ import_v418.z.discriminatedUnion("type", [
2853
+ import_v418.z.object({
2854
+ type: import_v418.z.literal("message"),
2855
+ role: import_v418.z.literal("assistant"),
2856
+ id: import_v418.z.string(),
2857
+ content: import_v418.z.array(
2858
+ import_v418.z.object({
2859
+ type: import_v418.z.literal("output_text"),
2860
+ text: import_v418.z.string(),
2806
2861
  logprobs: LOGPROBS_SCHEMA.nullish(),
2807
- annotations: import_v417.z.array(
2808
- import_v417.z.discriminatedUnion("type", [
2809
- import_v417.z.object({
2810
- type: import_v417.z.literal("url_citation"),
2811
- start_index: import_v417.z.number(),
2812
- end_index: import_v417.z.number(),
2813
- url: import_v417.z.string(),
2814
- title: import_v417.z.string()
2862
+ annotations: import_v418.z.array(
2863
+ import_v418.z.discriminatedUnion("type", [
2864
+ import_v418.z.object({
2865
+ type: import_v418.z.literal("url_citation"),
2866
+ start_index: import_v418.z.number(),
2867
+ end_index: import_v418.z.number(),
2868
+ url: import_v418.z.string(),
2869
+ title: import_v418.z.string()
2815
2870
  }),
2816
- import_v417.z.object({
2817
- type: import_v417.z.literal("file_citation"),
2818
- file_id: import_v417.z.string(),
2819
- filename: import_v417.z.string().nullish(),
2820
- index: import_v417.z.number().nullish(),
2821
- start_index: import_v417.z.number().nullish(),
2822
- end_index: import_v417.z.number().nullish(),
2823
- quote: import_v417.z.string().nullish()
2871
+ import_v418.z.object({
2872
+ type: import_v418.z.literal("file_citation"),
2873
+ file_id: import_v418.z.string(),
2874
+ filename: import_v418.z.string().nullish(),
2875
+ index: import_v418.z.number().nullish(),
2876
+ start_index: import_v418.z.number().nullish(),
2877
+ end_index: import_v418.z.number().nullish(),
2878
+ quote: import_v418.z.string().nullish()
2824
2879
  }),
2825
- import_v417.z.object({
2826
- type: import_v417.z.literal("container_file_citation")
2880
+ import_v418.z.object({
2881
+ type: import_v418.z.literal("container_file_citation")
2827
2882
  })
2828
2883
  ])
2829
2884
  )
@@ -2831,50 +2886,51 @@ var OpenAIResponsesLanguageModel = class {
2831
2886
  )
2832
2887
  }),
2833
2888
  codeInterpreterCallItem,
2834
- import_v417.z.object({
2835
- type: import_v417.z.literal("function_call"),
2836
- call_id: import_v417.z.string(),
2837
- name: import_v417.z.string(),
2838
- arguments: import_v417.z.string(),
2839
- id: import_v417.z.string()
2889
+ imageGenerationCallItem,
2890
+ import_v418.z.object({
2891
+ type: import_v418.z.literal("function_call"),
2892
+ call_id: import_v418.z.string(),
2893
+ name: import_v418.z.string(),
2894
+ arguments: import_v418.z.string(),
2895
+ id: import_v418.z.string()
2840
2896
  }),
2841
2897
  webSearchCallItem,
2842
- import_v417.z.object({
2843
- type: import_v417.z.literal("computer_call"),
2844
- id: import_v417.z.string(),
2845
- status: import_v417.z.string().optional()
2898
+ import_v418.z.object({
2899
+ type: import_v418.z.literal("computer_call"),
2900
+ id: import_v418.z.string(),
2901
+ status: import_v418.z.string().optional()
2846
2902
  }),
2847
- import_v417.z.object({
2848
- type: import_v417.z.literal("file_search_call"),
2849
- id: import_v417.z.string(),
2850
- status: import_v417.z.string().optional(),
2851
- queries: import_v417.z.array(import_v417.z.string()).nullish(),
2852
- results: import_v417.z.array(
2853
- import_v417.z.object({
2854
- attributes: import_v417.z.object({
2855
- file_id: import_v417.z.string(),
2856
- filename: import_v417.z.string(),
2857
- score: import_v417.z.number(),
2858
- text: import_v417.z.string()
2903
+ import_v418.z.object({
2904
+ type: import_v418.z.literal("file_search_call"),
2905
+ id: import_v418.z.string(),
2906
+ status: import_v418.z.string().optional(),
2907
+ queries: import_v418.z.array(import_v418.z.string()).nullish(),
2908
+ results: import_v418.z.array(
2909
+ import_v418.z.object({
2910
+ attributes: import_v418.z.object({
2911
+ file_id: import_v418.z.string(),
2912
+ filename: import_v418.z.string(),
2913
+ score: import_v418.z.number(),
2914
+ text: import_v418.z.string()
2859
2915
  })
2860
2916
  })
2861
2917
  ).nullish()
2862
2918
  }),
2863
- import_v417.z.object({
2864
- type: import_v417.z.literal("reasoning"),
2865
- id: import_v417.z.string(),
2866
- encrypted_content: import_v417.z.string().nullish(),
2867
- summary: import_v417.z.array(
2868
- import_v417.z.object({
2869
- type: import_v417.z.literal("summary_text"),
2870
- text: import_v417.z.string()
2919
+ import_v418.z.object({
2920
+ type: import_v418.z.literal("reasoning"),
2921
+ id: import_v418.z.string(),
2922
+ encrypted_content: import_v418.z.string().nullish(),
2923
+ summary: import_v418.z.array(
2924
+ import_v418.z.object({
2925
+ type: import_v418.z.literal("summary_text"),
2926
+ text: import_v418.z.string()
2871
2927
  })
2872
2928
  )
2873
2929
  })
2874
2930
  ])
2875
2931
  ),
2876
- service_tier: import_v417.z.string().nullish(),
2877
- incomplete_details: import_v417.z.object({ reason: import_v417.z.string() }).nullable(),
2932
+ service_tier: import_v418.z.string().nullish(),
2933
+ incomplete_details: import_v418.z.object({ reason: import_v418.z.string() }).nullable(),
2878
2934
  usage: usageSchema2
2879
2935
  })
2880
2936
  ),
@@ -2915,6 +2971,25 @@ var OpenAIResponsesLanguageModel = class {
2915
2971
  }
2916
2972
  break;
2917
2973
  }
2974
+ case "image_generation_call": {
2975
+ content.push({
2976
+ type: "tool-call",
2977
+ toolCallId: part.id,
2978
+ toolName: "image_generation",
2979
+ input: "{}",
2980
+ providerExecuted: true
2981
+ });
2982
+ content.push({
2983
+ type: "tool-result",
2984
+ toolCallId: part.id,
2985
+ toolName: "image_generation",
2986
+ result: {
2987
+ result: part.result
2988
+ },
2989
+ providerExecuted: true
2990
+ });
2991
+ break;
2992
+ }
2918
2993
  case "message": {
2919
2994
  for (const contentPart of part.content) {
2920
2995
  if (((_c = (_b = options.providerOptions) == null ? void 0 : _b.openai) == null ? void 0 : _c.logprobs) && contentPart.logprobs) {
@@ -3178,6 +3253,14 @@ var OpenAIResponsesLanguageModel = class {
3178
3253
  id: value.item.id,
3179
3254
  toolName: "file_search"
3180
3255
  });
3256
+ } else if (value.item.type === "image_generation_call") {
3257
+ controller.enqueue({
3258
+ type: "tool-call",
3259
+ toolCallId: value.item.id,
3260
+ toolName: "image_generation",
3261
+ input: "{}",
3262
+ providerExecuted: true
3263
+ });
3181
3264
  } else if (value.item.type === "message") {
3182
3265
  controller.enqueue({
3183
3266
  type: "text-start",
@@ -3311,6 +3394,16 @@ var OpenAIResponsesLanguageModel = class {
3311
3394
  },
3312
3395
  providerExecuted: true
3313
3396
  });
3397
+ } else if (value.item.type === "image_generation_call") {
3398
+ controller.enqueue({
3399
+ type: "tool-result",
3400
+ toolCallId: value.item.id,
3401
+ toolName: "image_generation",
3402
+ result: {
3403
+ result: value.item.result
3404
+ },
3405
+ providerExecuted: true
3406
+ });
3314
3407
  } else if (value.item.type === "message") {
3315
3408
  controller.enqueue({
3316
3409
  type: "text-end",
@@ -3447,177 +3540,182 @@ var OpenAIResponsesLanguageModel = class {
3447
3540
  };
3448
3541
  }
3449
3542
  };
3450
- var usageSchema2 = import_v417.z.object({
3451
- input_tokens: import_v417.z.number(),
3452
- input_tokens_details: import_v417.z.object({ cached_tokens: import_v417.z.number().nullish() }).nullish(),
3453
- output_tokens: import_v417.z.number(),
3454
- output_tokens_details: import_v417.z.object({ reasoning_tokens: import_v417.z.number().nullish() }).nullish()
3543
+ var usageSchema2 = import_v418.z.object({
3544
+ input_tokens: import_v418.z.number(),
3545
+ input_tokens_details: import_v418.z.object({ cached_tokens: import_v418.z.number().nullish() }).nullish(),
3546
+ output_tokens: import_v418.z.number(),
3547
+ output_tokens_details: import_v418.z.object({ reasoning_tokens: import_v418.z.number().nullish() }).nullish()
3455
3548
  });
3456
- var textDeltaChunkSchema = import_v417.z.object({
3457
- type: import_v417.z.literal("response.output_text.delta"),
3458
- item_id: import_v417.z.string(),
3459
- delta: import_v417.z.string(),
3549
+ var textDeltaChunkSchema = import_v418.z.object({
3550
+ type: import_v418.z.literal("response.output_text.delta"),
3551
+ item_id: import_v418.z.string(),
3552
+ delta: import_v418.z.string(),
3460
3553
  logprobs: LOGPROBS_SCHEMA.nullish()
3461
3554
  });
3462
- var errorChunkSchema = import_v417.z.object({
3463
- type: import_v417.z.literal("error"),
3464
- code: import_v417.z.string(),
3465
- message: import_v417.z.string(),
3466
- param: import_v417.z.string().nullish(),
3467
- sequence_number: import_v417.z.number()
3555
+ var errorChunkSchema = import_v418.z.object({
3556
+ type: import_v418.z.literal("error"),
3557
+ code: import_v418.z.string(),
3558
+ message: import_v418.z.string(),
3559
+ param: import_v418.z.string().nullish(),
3560
+ sequence_number: import_v418.z.number()
3468
3561
  });
3469
- var responseFinishedChunkSchema = import_v417.z.object({
3470
- type: import_v417.z.enum(["response.completed", "response.incomplete"]),
3471
- response: import_v417.z.object({
3472
- incomplete_details: import_v417.z.object({ reason: import_v417.z.string() }).nullish(),
3562
+ var responseFinishedChunkSchema = import_v418.z.object({
3563
+ type: import_v418.z.enum(["response.completed", "response.incomplete"]),
3564
+ response: import_v418.z.object({
3565
+ incomplete_details: import_v418.z.object({ reason: import_v418.z.string() }).nullish(),
3473
3566
  usage: usageSchema2,
3474
- service_tier: import_v417.z.string().nullish()
3567
+ service_tier: import_v418.z.string().nullish()
3475
3568
  })
3476
3569
  });
3477
- var responseCreatedChunkSchema = import_v417.z.object({
3478
- type: import_v417.z.literal("response.created"),
3479
- response: import_v417.z.object({
3480
- id: import_v417.z.string(),
3481
- created_at: import_v417.z.number(),
3482
- model: import_v417.z.string(),
3483
- service_tier: import_v417.z.string().nullish()
3570
+ var responseCreatedChunkSchema = import_v418.z.object({
3571
+ type: import_v418.z.literal("response.created"),
3572
+ response: import_v418.z.object({
3573
+ id: import_v418.z.string(),
3574
+ created_at: import_v418.z.number(),
3575
+ model: import_v418.z.string(),
3576
+ service_tier: import_v418.z.string().nullish()
3484
3577
  })
3485
3578
  });
3486
- var responseOutputItemAddedSchema = import_v417.z.object({
3487
- type: import_v417.z.literal("response.output_item.added"),
3488
- output_index: import_v417.z.number(),
3489
- item: import_v417.z.discriminatedUnion("type", [
3490
- import_v417.z.object({
3491
- type: import_v417.z.literal("message"),
3492
- id: import_v417.z.string()
3579
+ var responseOutputItemAddedSchema = import_v418.z.object({
3580
+ type: import_v418.z.literal("response.output_item.added"),
3581
+ output_index: import_v418.z.number(),
3582
+ item: import_v418.z.discriminatedUnion("type", [
3583
+ import_v418.z.object({
3584
+ type: import_v418.z.literal("message"),
3585
+ id: import_v418.z.string()
3493
3586
  }),
3494
- import_v417.z.object({
3495
- type: import_v417.z.literal("reasoning"),
3496
- id: import_v417.z.string(),
3497
- encrypted_content: import_v417.z.string().nullish()
3587
+ import_v418.z.object({
3588
+ type: import_v418.z.literal("reasoning"),
3589
+ id: import_v418.z.string(),
3590
+ encrypted_content: import_v418.z.string().nullish()
3498
3591
  }),
3499
- import_v417.z.object({
3500
- type: import_v417.z.literal("function_call"),
3501
- id: import_v417.z.string(),
3502
- call_id: import_v417.z.string(),
3503
- name: import_v417.z.string(),
3504
- arguments: import_v417.z.string()
3592
+ import_v418.z.object({
3593
+ type: import_v418.z.literal("function_call"),
3594
+ id: import_v418.z.string(),
3595
+ call_id: import_v418.z.string(),
3596
+ name: import_v418.z.string(),
3597
+ arguments: import_v418.z.string()
3505
3598
  }),
3506
- import_v417.z.object({
3507
- type: import_v417.z.literal("web_search_call"),
3508
- id: import_v417.z.string(),
3509
- status: import_v417.z.string(),
3510
- action: import_v417.z.object({
3511
- type: import_v417.z.literal("search"),
3512
- query: import_v417.z.string().optional()
3599
+ import_v418.z.object({
3600
+ type: import_v418.z.literal("web_search_call"),
3601
+ id: import_v418.z.string(),
3602
+ status: import_v418.z.string(),
3603
+ action: import_v418.z.object({
3604
+ type: import_v418.z.literal("search"),
3605
+ query: import_v418.z.string().optional()
3513
3606
  }).nullish()
3514
3607
  }),
3515
- import_v417.z.object({
3516
- type: import_v417.z.literal("computer_call"),
3517
- id: import_v417.z.string(),
3518
- status: import_v417.z.string()
3608
+ import_v418.z.object({
3609
+ type: import_v418.z.literal("computer_call"),
3610
+ id: import_v418.z.string(),
3611
+ status: import_v418.z.string()
3519
3612
  }),
3520
- import_v417.z.object({
3521
- type: import_v417.z.literal("file_search_call"),
3522
- id: import_v417.z.string(),
3523
- status: import_v417.z.string(),
3524
- queries: import_v417.z.array(import_v417.z.string()).nullish(),
3525
- results: import_v417.z.array(
3526
- import_v417.z.object({
3527
- attributes: import_v417.z.object({
3528
- file_id: import_v417.z.string(),
3529
- filename: import_v417.z.string(),
3530
- score: import_v417.z.number(),
3531
- text: import_v417.z.string()
3613
+ import_v418.z.object({
3614
+ type: import_v418.z.literal("file_search_call"),
3615
+ id: import_v418.z.string(),
3616
+ status: import_v418.z.string(),
3617
+ queries: import_v418.z.array(import_v418.z.string()).nullish(),
3618
+ results: import_v418.z.array(
3619
+ import_v418.z.object({
3620
+ attributes: import_v418.z.object({
3621
+ file_id: import_v418.z.string(),
3622
+ filename: import_v418.z.string(),
3623
+ score: import_v418.z.number(),
3624
+ text: import_v418.z.string()
3532
3625
  })
3533
3626
  })
3534
3627
  ).optional()
3628
+ }),
3629
+ import_v418.z.object({
3630
+ type: import_v418.z.literal("image_generation_call"),
3631
+ id: import_v418.z.string()
3535
3632
  })
3536
3633
  ])
3537
3634
  });
3538
- var responseOutputItemDoneSchema = import_v417.z.object({
3539
- type: import_v417.z.literal("response.output_item.done"),
3540
- output_index: import_v417.z.number(),
3541
- item: import_v417.z.discriminatedUnion("type", [
3542
- import_v417.z.object({
3543
- type: import_v417.z.literal("message"),
3544
- id: import_v417.z.string()
3635
+ var responseOutputItemDoneSchema = import_v418.z.object({
3636
+ type: import_v418.z.literal("response.output_item.done"),
3637
+ output_index: import_v418.z.number(),
3638
+ item: import_v418.z.discriminatedUnion("type", [
3639
+ import_v418.z.object({
3640
+ type: import_v418.z.literal("message"),
3641
+ id: import_v418.z.string()
3545
3642
  }),
3546
- import_v417.z.object({
3547
- type: import_v417.z.literal("reasoning"),
3548
- id: import_v417.z.string(),
3549
- encrypted_content: import_v417.z.string().nullish()
3643
+ import_v418.z.object({
3644
+ type: import_v418.z.literal("reasoning"),
3645
+ id: import_v418.z.string(),
3646
+ encrypted_content: import_v418.z.string().nullish()
3550
3647
  }),
3551
- import_v417.z.object({
3552
- type: import_v417.z.literal("function_call"),
3553
- id: import_v417.z.string(),
3554
- call_id: import_v417.z.string(),
3555
- name: import_v417.z.string(),
3556
- arguments: import_v417.z.string(),
3557
- status: import_v417.z.literal("completed")
3648
+ import_v418.z.object({
3649
+ type: import_v418.z.literal("function_call"),
3650
+ id: import_v418.z.string(),
3651
+ call_id: import_v418.z.string(),
3652
+ name: import_v418.z.string(),
3653
+ arguments: import_v418.z.string(),
3654
+ status: import_v418.z.literal("completed")
3558
3655
  }),
3559
3656
  codeInterpreterCallItem,
3657
+ imageGenerationCallItem,
3560
3658
  webSearchCallItem,
3561
- import_v417.z.object({
3562
- type: import_v417.z.literal("computer_call"),
3563
- id: import_v417.z.string(),
3564
- status: import_v417.z.literal("completed")
3659
+ import_v418.z.object({
3660
+ type: import_v418.z.literal("computer_call"),
3661
+ id: import_v418.z.string(),
3662
+ status: import_v418.z.literal("completed")
3565
3663
  }),
3566
- import_v417.z.object({
3567
- type: import_v417.z.literal("file_search_call"),
3568
- id: import_v417.z.string(),
3569
- status: import_v417.z.literal("completed"),
3570
- queries: import_v417.z.array(import_v417.z.string()).nullish(),
3571
- results: import_v417.z.array(
3572
- import_v417.z.object({
3573
- attributes: import_v417.z.object({
3574
- file_id: import_v417.z.string(),
3575
- filename: import_v417.z.string(),
3576
- score: import_v417.z.number(),
3577
- text: import_v417.z.string()
3664
+ import_v418.z.object({
3665
+ type: import_v418.z.literal("file_search_call"),
3666
+ id: import_v418.z.string(),
3667
+ status: import_v418.z.literal("completed"),
3668
+ queries: import_v418.z.array(import_v418.z.string()).nullish(),
3669
+ results: import_v418.z.array(
3670
+ import_v418.z.object({
3671
+ attributes: import_v418.z.object({
3672
+ file_id: import_v418.z.string(),
3673
+ filename: import_v418.z.string(),
3674
+ score: import_v418.z.number(),
3675
+ text: import_v418.z.string()
3578
3676
  })
3579
3677
  })
3580
3678
  ).nullish()
3581
3679
  })
3582
3680
  ])
3583
3681
  });
3584
- var responseFunctionCallArgumentsDeltaSchema = import_v417.z.object({
3585
- type: import_v417.z.literal("response.function_call_arguments.delta"),
3586
- item_id: import_v417.z.string(),
3587
- output_index: import_v417.z.number(),
3588
- delta: import_v417.z.string()
3682
+ var responseFunctionCallArgumentsDeltaSchema = import_v418.z.object({
3683
+ type: import_v418.z.literal("response.function_call_arguments.delta"),
3684
+ item_id: import_v418.z.string(),
3685
+ output_index: import_v418.z.number(),
3686
+ delta: import_v418.z.string()
3589
3687
  });
3590
- var responseAnnotationAddedSchema = import_v417.z.object({
3591
- type: import_v417.z.literal("response.output_text.annotation.added"),
3592
- annotation: import_v417.z.discriminatedUnion("type", [
3593
- import_v417.z.object({
3594
- type: import_v417.z.literal("url_citation"),
3595
- url: import_v417.z.string(),
3596
- title: import_v417.z.string()
3688
+ var responseAnnotationAddedSchema = import_v418.z.object({
3689
+ type: import_v418.z.literal("response.output_text.annotation.added"),
3690
+ annotation: import_v418.z.discriminatedUnion("type", [
3691
+ import_v418.z.object({
3692
+ type: import_v418.z.literal("url_citation"),
3693
+ url: import_v418.z.string(),
3694
+ title: import_v418.z.string()
3597
3695
  }),
3598
- import_v417.z.object({
3599
- type: import_v417.z.literal("file_citation"),
3600
- file_id: import_v417.z.string(),
3601
- filename: import_v417.z.string().nullish(),
3602
- index: import_v417.z.number().nullish(),
3603
- start_index: import_v417.z.number().nullish(),
3604
- end_index: import_v417.z.number().nullish(),
3605
- quote: import_v417.z.string().nullish()
3696
+ import_v418.z.object({
3697
+ type: import_v418.z.literal("file_citation"),
3698
+ file_id: import_v418.z.string(),
3699
+ filename: import_v418.z.string().nullish(),
3700
+ index: import_v418.z.number().nullish(),
3701
+ start_index: import_v418.z.number().nullish(),
3702
+ end_index: import_v418.z.number().nullish(),
3703
+ quote: import_v418.z.string().nullish()
3606
3704
  })
3607
3705
  ])
3608
3706
  });
3609
- var responseReasoningSummaryPartAddedSchema = import_v417.z.object({
3610
- type: import_v417.z.literal("response.reasoning_summary_part.added"),
3611
- item_id: import_v417.z.string(),
3612
- summary_index: import_v417.z.number()
3707
+ var responseReasoningSummaryPartAddedSchema = import_v418.z.object({
3708
+ type: import_v418.z.literal("response.reasoning_summary_part.added"),
3709
+ item_id: import_v418.z.string(),
3710
+ summary_index: import_v418.z.number()
3613
3711
  });
3614
- var responseReasoningSummaryTextDeltaSchema = import_v417.z.object({
3615
- type: import_v417.z.literal("response.reasoning_summary_text.delta"),
3616
- item_id: import_v417.z.string(),
3617
- summary_index: import_v417.z.number(),
3618
- delta: import_v417.z.string()
3712
+ var responseReasoningSummaryTextDeltaSchema = import_v418.z.object({
3713
+ type: import_v418.z.literal("response.reasoning_summary_text.delta"),
3714
+ item_id: import_v418.z.string(),
3715
+ summary_index: import_v418.z.number(),
3716
+ delta: import_v418.z.string()
3619
3717
  });
3620
- var openaiResponsesChunkSchema = import_v417.z.union([
3718
+ var openaiResponsesChunkSchema = import_v418.z.union([
3621
3719
  textDeltaChunkSchema,
3622
3720
  responseFinishedChunkSchema,
3623
3721
  responseCreatedChunkSchema,
@@ -3628,7 +3726,7 @@ var openaiResponsesChunkSchema = import_v417.z.union([
3628
3726
  responseReasoningSummaryPartAddedSchema,
3629
3727
  responseReasoningSummaryTextDeltaSchema,
3630
3728
  errorChunkSchema,
3631
- import_v417.z.object({ type: import_v417.z.string() }).loose()
3729
+ import_v418.z.object({ type: import_v418.z.string() }).loose()
3632
3730
  // fallback for unknown chunks
3633
3731
  ]);
3634
3732
  function isTextDeltaChunk(chunk) {
@@ -3701,27 +3799,15 @@ function getResponsesModelConfig(modelId) {
3701
3799
  isReasoningModel: false
3702
3800
  };
3703
3801
  }
3704
- var openaiResponsesProviderOptionsSchema = import_v417.z.object({
3705
- metadata: import_v417.z.any().nullish(),
3706
- parallelToolCalls: import_v417.z.boolean().nullish(),
3707
- previousResponseId: import_v417.z.string().nullish(),
3708
- store: import_v417.z.boolean().nullish(),
3709
- user: import_v417.z.string().nullish(),
3710
- reasoningEffort: import_v417.z.string().nullish(),
3711
- strictJsonSchema: import_v417.z.boolean().nullish(),
3712
- instructions: import_v417.z.string().nullish(),
3713
- reasoningSummary: import_v417.z.string().nullish(),
3714
- serviceTier: import_v417.z.enum(["auto", "flex", "priority"]).nullish(),
3715
- include: import_v417.z.array(
3716
- import_v417.z.enum([
3802
+ var openaiResponsesProviderOptionsSchema = import_v418.z.object({
3803
+ include: import_v418.z.array(
3804
+ import_v418.z.enum([
3717
3805
  "reasoning.encrypted_content",
3718
3806
  "file_search_call.results",
3719
3807
  "message.output_text.logprobs"
3720
3808
  ])
3721
3809
  ).nullish(),
3722
- textVerbosity: import_v417.z.enum(["low", "medium", "high"]).nullish(),
3723
- promptCacheKey: import_v417.z.string().nullish(),
3724
- safetyIdentifier: import_v417.z.string().nullish(),
3810
+ instructions: import_v418.z.string().nullish(),
3725
3811
  /**
3726
3812
  * Return the log probabilities of the tokens.
3727
3813
  *
@@ -3734,7 +3820,25 @@ var openaiResponsesProviderOptionsSchema = import_v417.z.object({
3734
3820
  * @see https://platform.openai.com/docs/api-reference/responses/create
3735
3821
  * @see https://cookbook.openai.com/examples/using_logprobs
3736
3822
  */
3737
- logprobs: import_v417.z.union([import_v417.z.boolean(), import_v417.z.number().min(1).max(TOP_LOGPROBS_MAX)]).optional()
3823
+ logprobs: import_v418.z.union([import_v418.z.boolean(), import_v418.z.number().min(1).max(TOP_LOGPROBS_MAX)]).optional(),
3824
+ /**
3825
+ * The maximum number of total calls to built-in tools that can be processed in a response.
3826
+ * This maximum number applies across all built-in tool calls, not per individual tool.
3827
+ * Any further attempts to call a tool by the model will be ignored.
3828
+ */
3829
+ maxToolCalls: import_v418.z.number().nullish(),
3830
+ metadata: import_v418.z.any().nullish(),
3831
+ parallelToolCalls: import_v418.z.boolean().nullish(),
3832
+ previousResponseId: import_v418.z.string().nullish(),
3833
+ promptCacheKey: import_v418.z.string().nullish(),
3834
+ reasoningEffort: import_v418.z.string().nullish(),
3835
+ reasoningSummary: import_v418.z.string().nullish(),
3836
+ safetyIdentifier: import_v418.z.string().nullish(),
3837
+ serviceTier: import_v418.z.enum(["auto", "flex", "priority"]).nullish(),
3838
+ store: import_v418.z.boolean().nullish(),
3839
+ strictJsonSchema: import_v418.z.boolean().nullish(),
3840
+ textVerbosity: import_v418.z.enum(["low", "medium", "high"]).nullish(),
3841
+ user: import_v418.z.string().nullish()
3738
3842
  });
3739
3843
  // Annotate the CommonJS export names for ESM import in node:
3740
3844
  0 && (module.exports = {