@ai-sdk/openai 2.0.30 → 2.0.31

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -2155,7 +2155,7 @@ import {
2155
2155
  parseProviderOptions as parseProviderOptions7,
2156
2156
  postJsonToApi as postJsonToApi6
2157
2157
  } from "@ai-sdk/provider-utils";
2158
- import { z as z17 } from "zod/v4";
2158
+ import { z as z18 } from "zod/v4";
2159
2159
 
2160
2160
  // src/responses/convert-to-openai-responses-input.ts
2161
2161
  import {
@@ -2170,7 +2170,8 @@ function isFileId(data, prefixes) {
2170
2170
  async function convertToOpenAIResponsesInput({
2171
2171
  prompt,
2172
2172
  systemMessageMode,
2173
- fileIdPrefixes
2173
+ fileIdPrefixes,
2174
+ store
2174
2175
  }) {
2175
2176
  var _a, _b, _c, _d, _e, _f;
2176
2177
  const input = [];
@@ -2275,10 +2276,14 @@ async function convertToOpenAIResponsesInput({
2275
2276
  break;
2276
2277
  }
2277
2278
  case "tool-result": {
2278
- warnings.push({
2279
- type: "other",
2280
- message: `tool result parts in assistant messages are not supported for OpenAI responses`
2281
- });
2279
+ if (store) {
2280
+ input.push({ type: "item_reference", id: part.toolCallId });
2281
+ } else {
2282
+ warnings.push({
2283
+ type: "other",
2284
+ message: `Results for OpenAI tool ${part.toolName} are not sent to the API when store is false`
2285
+ });
2286
+ }
2282
2287
  break;
2283
2288
  }
2284
2289
  case "reasoning": {
@@ -2449,6 +2454,33 @@ var webSearchToolFactory = createProviderDefinedToolFactory3({
2449
2454
  })
2450
2455
  });
2451
2456
 
2457
+ // src/tool/image-generation.ts
2458
+ import { createProviderDefinedToolFactoryWithOutputSchema as createProviderDefinedToolFactoryWithOutputSchema2 } from "@ai-sdk/provider-utils";
2459
+ import { z as z17 } from "zod/v4";
2460
+ var imageGenerationArgsSchema = z17.object({
2461
+ background: z17.enum(["auto", "opaque", "transparent"]).optional(),
2462
+ inputFidelity: z17.enum(["low", "high"]).optional(),
2463
+ inputImageMask: z17.object({
2464
+ fileId: z17.string().optional(),
2465
+ imageUrl: z17.string().optional()
2466
+ }).optional(),
2467
+ model: z17.string().optional(),
2468
+ moderation: z17.enum(["auto"]).optional(),
2469
+ outputCompression: z17.number().int().min(0).max(100).optional(),
2470
+ outputFormat: z17.enum(["png", "jpeg", "webp"]).optional(),
2471
+ quality: z17.enum(["auto", "low", "medium", "high"]).optional(),
2472
+ size: z17.enum(["1024x1024", "1024x1536", "1536x1024", "auto"]).optional()
2473
+ }).strict();
2474
+ var imageGenerationOutputSchema = z17.object({
2475
+ result: z17.string()
2476
+ });
2477
+ var imageGenerationToolFactory = createProviderDefinedToolFactoryWithOutputSchema2({
2478
+ id: "openai.image_generation",
2479
+ name: "image_generation",
2480
+ inputSchema: z17.object({}),
2481
+ outputSchema: imageGenerationOutputSchema
2482
+ });
2483
+
2452
2484
  // src/responses/openai-responses-prepare-tools.ts
2453
2485
  function prepareResponsesTools({
2454
2486
  tools,
@@ -2512,8 +2544,23 @@ function prepareResponsesTools({
2512
2544
  });
2513
2545
  break;
2514
2546
  }
2515
- default: {
2516
- toolWarnings.push({ type: "unsupported-tool", tool });
2547
+ case "openai.image_generation": {
2548
+ const args = imageGenerationArgsSchema.parse(tool.args);
2549
+ openaiTools.push({
2550
+ type: "image_generation",
2551
+ background: args.background,
2552
+ input_fidelity: args.inputFidelity,
2553
+ input_image_mask: args.inputImageMask ? {
2554
+ file_id: args.inputImageMask.fileId,
2555
+ image_url: args.inputImageMask.imageUrl
2556
+ } : void 0,
2557
+ model: args.model,
2558
+ size: args.size,
2559
+ quality: args.quality,
2560
+ moderation: args.moderation,
2561
+ output_format: args.outputFormat,
2562
+ output_compression: args.outputCompression
2563
+ });
2517
2564
  break;
2518
2565
  }
2519
2566
  }
@@ -2536,7 +2583,7 @@ function prepareResponsesTools({
2536
2583
  case "tool":
2537
2584
  return {
2538
2585
  tools: openaiTools,
2539
- toolChoice: toolChoice.toolName === "code_interpreter" || toolChoice.toolName === "file_search" || toolChoice.toolName === "web_search_preview" || toolChoice.toolName === "web_search" ? { type: toolChoice.toolName } : { type: "function", name: toolChoice.toolName },
2586
+ toolChoice: toolChoice.toolName === "code_interpreter" || toolChoice.toolName === "file_search" || toolChoice.toolName === "image_generation" || toolChoice.toolName === "web_search_preview" || toolChoice.toolName === "web_search" ? { type: toolChoice.toolName } : { type: "function", name: toolChoice.toolName },
2540
2587
  toolWarnings
2541
2588
  };
2542
2589
  default: {
@@ -2549,47 +2596,52 @@ function prepareResponsesTools({
2549
2596
  }
2550
2597
 
2551
2598
  // src/responses/openai-responses-language-model.ts
2552
- var webSearchCallItem = z17.object({
2553
- type: z17.literal("web_search_call"),
2554
- id: z17.string(),
2555
- status: z17.string(),
2556
- action: z17.discriminatedUnion("type", [
2557
- z17.object({
2558
- type: z17.literal("search"),
2559
- query: z17.string().nullish()
2599
+ var webSearchCallItem = z18.object({
2600
+ type: z18.literal("web_search_call"),
2601
+ id: z18.string(),
2602
+ status: z18.string(),
2603
+ action: z18.discriminatedUnion("type", [
2604
+ z18.object({
2605
+ type: z18.literal("search"),
2606
+ query: z18.string().nullish()
2560
2607
  }),
2561
- z17.object({
2562
- type: z17.literal("open_page"),
2563
- url: z17.string()
2608
+ z18.object({
2609
+ type: z18.literal("open_page"),
2610
+ url: z18.string()
2564
2611
  }),
2565
- z17.object({
2566
- type: z17.literal("find"),
2567
- url: z17.string(),
2568
- pattern: z17.string()
2612
+ z18.object({
2613
+ type: z18.literal("find"),
2614
+ url: z18.string(),
2615
+ pattern: z18.string()
2569
2616
  })
2570
2617
  ]).nullish()
2571
2618
  });
2572
- var codeInterpreterCallItem = z17.object({
2573
- type: z17.literal("code_interpreter_call"),
2574
- id: z17.string(),
2575
- code: z17.string().nullable(),
2576
- container_id: z17.string(),
2577
- outputs: z17.array(
2578
- z17.discriminatedUnion("type", [
2579
- z17.object({ type: z17.literal("logs"), logs: z17.string() }),
2580
- z17.object({ type: z17.literal("image"), url: z17.string() })
2619
+ var codeInterpreterCallItem = z18.object({
2620
+ type: z18.literal("code_interpreter_call"),
2621
+ id: z18.string(),
2622
+ code: z18.string().nullable(),
2623
+ container_id: z18.string(),
2624
+ outputs: z18.array(
2625
+ z18.discriminatedUnion("type", [
2626
+ z18.object({ type: z18.literal("logs"), logs: z18.string() }),
2627
+ z18.object({ type: z18.literal("image"), url: z18.string() })
2581
2628
  ])
2582
2629
  ).nullable()
2583
2630
  });
2631
+ var imageGenerationCallItem = z18.object({
2632
+ type: z18.literal("image_generation_call"),
2633
+ id: z18.string(),
2634
+ result: z18.string()
2635
+ });
2584
2636
  var TOP_LOGPROBS_MAX = 20;
2585
- var LOGPROBS_SCHEMA = z17.array(
2586
- z17.object({
2587
- token: z17.string(),
2588
- logprob: z17.number(),
2589
- top_logprobs: z17.array(
2590
- z17.object({
2591
- token: z17.string(),
2592
- logprob: z17.number()
2637
+ var LOGPROBS_SCHEMA = z18.array(
2638
+ z18.object({
2639
+ token: z18.string(),
2640
+ logprob: z18.number(),
2641
+ top_logprobs: z18.array(
2642
+ z18.object({
2643
+ token: z18.string(),
2644
+ logprob: z18.number()
2593
2645
  })
2594
2646
  )
2595
2647
  })
@@ -2622,7 +2674,7 @@ var OpenAIResponsesLanguageModel = class {
2622
2674
  toolChoice,
2623
2675
  responseFormat
2624
2676
  }) {
2625
- var _a, _b, _c, _d;
2677
+ var _a, _b, _c, _d, _e;
2626
2678
  const warnings = [];
2627
2679
  const modelConfig = getResponsesModelConfig(this.modelId);
2628
2680
  if (topK != null) {
@@ -2646,28 +2698,29 @@ var OpenAIResponsesLanguageModel = class {
2646
2698
  if (stopSequences != null) {
2647
2699
  warnings.push({ type: "unsupported-setting", setting: "stopSequences" });
2648
2700
  }
2649
- const { input, warnings: inputWarnings } = await convertToOpenAIResponsesInput({
2650
- prompt,
2651
- systemMessageMode: modelConfig.systemMessageMode,
2652
- fileIdPrefixes: this.config.fileIdPrefixes
2653
- });
2654
- warnings.push(...inputWarnings);
2655
2701
  const openaiOptions = await parseProviderOptions7({
2656
2702
  provider: "openai",
2657
2703
  providerOptions,
2658
2704
  schema: openaiResponsesProviderOptionsSchema
2659
2705
  });
2660
- const strictJsonSchema = (_a = openaiOptions == null ? void 0 : openaiOptions.strictJsonSchema) != null ? _a : false;
2706
+ const { input, warnings: inputWarnings } = await convertToOpenAIResponsesInput({
2707
+ prompt,
2708
+ systemMessageMode: modelConfig.systemMessageMode,
2709
+ fileIdPrefixes: this.config.fileIdPrefixes,
2710
+ store: (_a = openaiOptions == null ? void 0 : openaiOptions.store) != null ? _a : true
2711
+ });
2712
+ warnings.push(...inputWarnings);
2713
+ const strictJsonSchema = (_b = openaiOptions == null ? void 0 : openaiOptions.strictJsonSchema) != null ? _b : false;
2661
2714
  let include = openaiOptions == null ? void 0 : openaiOptions.include;
2662
2715
  const topLogprobs = typeof (openaiOptions == null ? void 0 : openaiOptions.logprobs) === "number" ? openaiOptions == null ? void 0 : openaiOptions.logprobs : (openaiOptions == null ? void 0 : openaiOptions.logprobs) === true ? TOP_LOGPROBS_MAX : void 0;
2663
2716
  include = topLogprobs ? Array.isArray(include) ? [...include, "message.output_text.logprobs"] : ["message.output_text.logprobs"] : include;
2664
- const webSearchToolName = (_b = tools == null ? void 0 : tools.find(
2717
+ const webSearchToolName = (_c = tools == null ? void 0 : tools.find(
2665
2718
  (tool) => tool.type === "provider-defined" && (tool.id === "openai.web_search" || tool.id === "openai.web_search_preview")
2666
- )) == null ? void 0 : _b.name;
2719
+ )) == null ? void 0 : _c.name;
2667
2720
  include = webSearchToolName ? Array.isArray(include) ? [...include, "web_search_call.action.sources"] : ["web_search_call.action.sources"] : include;
2668
- const codeInterpreterToolName = (_c = tools == null ? void 0 : tools.find(
2721
+ const codeInterpreterToolName = (_d = tools == null ? void 0 : tools.find(
2669
2722
  (tool) => tool.type === "provider-defined" && tool.id === "openai.code_interpreter"
2670
- )) == null ? void 0 : _c.name;
2723
+ )) == null ? void 0 : _d.name;
2671
2724
  include = codeInterpreterToolName ? Array.isArray(include) ? [...include, "code_interpreter_call.outputs"] : ["code_interpreter_call.outputs"] : include;
2672
2725
  const baseArgs = {
2673
2726
  model: this.modelId,
@@ -2681,7 +2734,7 @@ var OpenAIResponsesLanguageModel = class {
2681
2734
  format: responseFormat.schema != null ? {
2682
2735
  type: "json_schema",
2683
2736
  strict: strictJsonSchema,
2684
- name: (_d = responseFormat.name) != null ? _d : "response",
2737
+ name: (_e = responseFormat.name) != null ? _e : "response",
2685
2738
  description: responseFormat.description,
2686
2739
  schema: responseFormat.schema
2687
2740
  } : { type: "json_object" }
@@ -2692,6 +2745,7 @@ var OpenAIResponsesLanguageModel = class {
2692
2745
  }
2693
2746
  },
2694
2747
  // provider options:
2748
+ max_tool_calls: openaiOptions == null ? void 0 : openaiOptions.maxToolCalls,
2695
2749
  metadata: openaiOptions == null ? void 0 : openaiOptions.metadata,
2696
2750
  parallel_tool_calls: openaiOptions == null ? void 0 : openaiOptions.parallelToolCalls,
2697
2751
  previous_response_id: openaiOptions == null ? void 0 : openaiOptions.previousResponseId,
@@ -2807,45 +2861,45 @@ var OpenAIResponsesLanguageModel = class {
2807
2861
  body,
2808
2862
  failedResponseHandler: openaiFailedResponseHandler,
2809
2863
  successfulResponseHandler: createJsonResponseHandler6(
2810
- z17.object({
2811
- id: z17.string(),
2812
- created_at: z17.number(),
2813
- error: z17.object({
2814
- code: z17.string(),
2815
- message: z17.string()
2864
+ z18.object({
2865
+ id: z18.string(),
2866
+ created_at: z18.number(),
2867
+ error: z18.object({
2868
+ code: z18.string(),
2869
+ message: z18.string()
2816
2870
  }).nullish(),
2817
- model: z17.string(),
2818
- output: z17.array(
2819
- z17.discriminatedUnion("type", [
2820
- z17.object({
2821
- type: z17.literal("message"),
2822
- role: z17.literal("assistant"),
2823
- id: z17.string(),
2824
- content: z17.array(
2825
- z17.object({
2826
- type: z17.literal("output_text"),
2827
- text: z17.string(),
2871
+ model: z18.string(),
2872
+ output: z18.array(
2873
+ z18.discriminatedUnion("type", [
2874
+ z18.object({
2875
+ type: z18.literal("message"),
2876
+ role: z18.literal("assistant"),
2877
+ id: z18.string(),
2878
+ content: z18.array(
2879
+ z18.object({
2880
+ type: z18.literal("output_text"),
2881
+ text: z18.string(),
2828
2882
  logprobs: LOGPROBS_SCHEMA.nullish(),
2829
- annotations: z17.array(
2830
- z17.discriminatedUnion("type", [
2831
- z17.object({
2832
- type: z17.literal("url_citation"),
2833
- start_index: z17.number(),
2834
- end_index: z17.number(),
2835
- url: z17.string(),
2836
- title: z17.string()
2883
+ annotations: z18.array(
2884
+ z18.discriminatedUnion("type", [
2885
+ z18.object({
2886
+ type: z18.literal("url_citation"),
2887
+ start_index: z18.number(),
2888
+ end_index: z18.number(),
2889
+ url: z18.string(),
2890
+ title: z18.string()
2837
2891
  }),
2838
- z17.object({
2839
- type: z17.literal("file_citation"),
2840
- file_id: z17.string(),
2841
- filename: z17.string().nullish(),
2842
- index: z17.number().nullish(),
2843
- start_index: z17.number().nullish(),
2844
- end_index: z17.number().nullish(),
2845
- quote: z17.string().nullish()
2892
+ z18.object({
2893
+ type: z18.literal("file_citation"),
2894
+ file_id: z18.string(),
2895
+ filename: z18.string().nullish(),
2896
+ index: z18.number().nullish(),
2897
+ start_index: z18.number().nullish(),
2898
+ end_index: z18.number().nullish(),
2899
+ quote: z18.string().nullish()
2846
2900
  }),
2847
- z17.object({
2848
- type: z17.literal("container_file_citation")
2901
+ z18.object({
2902
+ type: z18.literal("container_file_citation")
2849
2903
  })
2850
2904
  ])
2851
2905
  )
@@ -2853,50 +2907,51 @@ var OpenAIResponsesLanguageModel = class {
2853
2907
  )
2854
2908
  }),
2855
2909
  codeInterpreterCallItem,
2856
- z17.object({
2857
- type: z17.literal("function_call"),
2858
- call_id: z17.string(),
2859
- name: z17.string(),
2860
- arguments: z17.string(),
2861
- id: z17.string()
2910
+ imageGenerationCallItem,
2911
+ z18.object({
2912
+ type: z18.literal("function_call"),
2913
+ call_id: z18.string(),
2914
+ name: z18.string(),
2915
+ arguments: z18.string(),
2916
+ id: z18.string()
2862
2917
  }),
2863
2918
  webSearchCallItem,
2864
- z17.object({
2865
- type: z17.literal("computer_call"),
2866
- id: z17.string(),
2867
- status: z17.string().optional()
2919
+ z18.object({
2920
+ type: z18.literal("computer_call"),
2921
+ id: z18.string(),
2922
+ status: z18.string().optional()
2868
2923
  }),
2869
- z17.object({
2870
- type: z17.literal("file_search_call"),
2871
- id: z17.string(),
2872
- status: z17.string().optional(),
2873
- queries: z17.array(z17.string()).nullish(),
2874
- results: z17.array(
2875
- z17.object({
2876
- attributes: z17.object({
2877
- file_id: z17.string(),
2878
- filename: z17.string(),
2879
- score: z17.number(),
2880
- text: z17.string()
2924
+ z18.object({
2925
+ type: z18.literal("file_search_call"),
2926
+ id: z18.string(),
2927
+ status: z18.string().optional(),
2928
+ queries: z18.array(z18.string()).nullish(),
2929
+ results: z18.array(
2930
+ z18.object({
2931
+ attributes: z18.object({
2932
+ file_id: z18.string(),
2933
+ filename: z18.string(),
2934
+ score: z18.number(),
2935
+ text: z18.string()
2881
2936
  })
2882
2937
  })
2883
2938
  ).nullish()
2884
2939
  }),
2885
- z17.object({
2886
- type: z17.literal("reasoning"),
2887
- id: z17.string(),
2888
- encrypted_content: z17.string().nullish(),
2889
- summary: z17.array(
2890
- z17.object({
2891
- type: z17.literal("summary_text"),
2892
- text: z17.string()
2940
+ z18.object({
2941
+ type: z18.literal("reasoning"),
2942
+ id: z18.string(),
2943
+ encrypted_content: z18.string().nullish(),
2944
+ summary: z18.array(
2945
+ z18.object({
2946
+ type: z18.literal("summary_text"),
2947
+ text: z18.string()
2893
2948
  })
2894
2949
  )
2895
2950
  })
2896
2951
  ])
2897
2952
  ),
2898
- service_tier: z17.string().nullish(),
2899
- incomplete_details: z17.object({ reason: z17.string() }).nullable(),
2953
+ service_tier: z18.string().nullish(),
2954
+ incomplete_details: z18.object({ reason: z18.string() }).nullable(),
2900
2955
  usage: usageSchema2
2901
2956
  })
2902
2957
  ),
@@ -2937,6 +2992,25 @@ var OpenAIResponsesLanguageModel = class {
2937
2992
  }
2938
2993
  break;
2939
2994
  }
2995
+ case "image_generation_call": {
2996
+ content.push({
2997
+ type: "tool-call",
2998
+ toolCallId: part.id,
2999
+ toolName: "image_generation",
3000
+ input: "{}",
3001
+ providerExecuted: true
3002
+ });
3003
+ content.push({
3004
+ type: "tool-result",
3005
+ toolCallId: part.id,
3006
+ toolName: "image_generation",
3007
+ result: {
3008
+ result: part.result
3009
+ },
3010
+ providerExecuted: true
3011
+ });
3012
+ break;
3013
+ }
2940
3014
  case "message": {
2941
3015
  for (const contentPart of part.content) {
2942
3016
  if (((_c = (_b = options.providerOptions) == null ? void 0 : _b.openai) == null ? void 0 : _c.logprobs) && contentPart.logprobs) {
@@ -3200,6 +3274,14 @@ var OpenAIResponsesLanguageModel = class {
3200
3274
  id: value.item.id,
3201
3275
  toolName: "file_search"
3202
3276
  });
3277
+ } else if (value.item.type === "image_generation_call") {
3278
+ controller.enqueue({
3279
+ type: "tool-call",
3280
+ toolCallId: value.item.id,
3281
+ toolName: "image_generation",
3282
+ input: "{}",
3283
+ providerExecuted: true
3284
+ });
3203
3285
  } else if (value.item.type === "message") {
3204
3286
  controller.enqueue({
3205
3287
  type: "text-start",
@@ -3333,6 +3415,16 @@ var OpenAIResponsesLanguageModel = class {
3333
3415
  },
3334
3416
  providerExecuted: true
3335
3417
  });
3418
+ } else if (value.item.type === "image_generation_call") {
3419
+ controller.enqueue({
3420
+ type: "tool-result",
3421
+ toolCallId: value.item.id,
3422
+ toolName: "image_generation",
3423
+ result: {
3424
+ result: value.item.result
3425
+ },
3426
+ providerExecuted: true
3427
+ });
3336
3428
  } else if (value.item.type === "message") {
3337
3429
  controller.enqueue({
3338
3430
  type: "text-end",
@@ -3469,177 +3561,182 @@ var OpenAIResponsesLanguageModel = class {
3469
3561
  };
3470
3562
  }
3471
3563
  };
3472
- var usageSchema2 = z17.object({
3473
- input_tokens: z17.number(),
3474
- input_tokens_details: z17.object({ cached_tokens: z17.number().nullish() }).nullish(),
3475
- output_tokens: z17.number(),
3476
- output_tokens_details: z17.object({ reasoning_tokens: z17.number().nullish() }).nullish()
3564
+ var usageSchema2 = z18.object({
3565
+ input_tokens: z18.number(),
3566
+ input_tokens_details: z18.object({ cached_tokens: z18.number().nullish() }).nullish(),
3567
+ output_tokens: z18.number(),
3568
+ output_tokens_details: z18.object({ reasoning_tokens: z18.number().nullish() }).nullish()
3477
3569
  });
3478
- var textDeltaChunkSchema = z17.object({
3479
- type: z17.literal("response.output_text.delta"),
3480
- item_id: z17.string(),
3481
- delta: z17.string(),
3570
+ var textDeltaChunkSchema = z18.object({
3571
+ type: z18.literal("response.output_text.delta"),
3572
+ item_id: z18.string(),
3573
+ delta: z18.string(),
3482
3574
  logprobs: LOGPROBS_SCHEMA.nullish()
3483
3575
  });
3484
- var errorChunkSchema = z17.object({
3485
- type: z17.literal("error"),
3486
- code: z17.string(),
3487
- message: z17.string(),
3488
- param: z17.string().nullish(),
3489
- sequence_number: z17.number()
3576
+ var errorChunkSchema = z18.object({
3577
+ type: z18.literal("error"),
3578
+ code: z18.string(),
3579
+ message: z18.string(),
3580
+ param: z18.string().nullish(),
3581
+ sequence_number: z18.number()
3490
3582
  });
3491
- var responseFinishedChunkSchema = z17.object({
3492
- type: z17.enum(["response.completed", "response.incomplete"]),
3493
- response: z17.object({
3494
- incomplete_details: z17.object({ reason: z17.string() }).nullish(),
3583
+ var responseFinishedChunkSchema = z18.object({
3584
+ type: z18.enum(["response.completed", "response.incomplete"]),
3585
+ response: z18.object({
3586
+ incomplete_details: z18.object({ reason: z18.string() }).nullish(),
3495
3587
  usage: usageSchema2,
3496
- service_tier: z17.string().nullish()
3588
+ service_tier: z18.string().nullish()
3497
3589
  })
3498
3590
  });
3499
- var responseCreatedChunkSchema = z17.object({
3500
- type: z17.literal("response.created"),
3501
- response: z17.object({
3502
- id: z17.string(),
3503
- created_at: z17.number(),
3504
- model: z17.string(),
3505
- service_tier: z17.string().nullish()
3591
+ var responseCreatedChunkSchema = z18.object({
3592
+ type: z18.literal("response.created"),
3593
+ response: z18.object({
3594
+ id: z18.string(),
3595
+ created_at: z18.number(),
3596
+ model: z18.string(),
3597
+ service_tier: z18.string().nullish()
3506
3598
  })
3507
3599
  });
3508
- var responseOutputItemAddedSchema = z17.object({
3509
- type: z17.literal("response.output_item.added"),
3510
- output_index: z17.number(),
3511
- item: z17.discriminatedUnion("type", [
3512
- z17.object({
3513
- type: z17.literal("message"),
3514
- id: z17.string()
3600
+ var responseOutputItemAddedSchema = z18.object({
3601
+ type: z18.literal("response.output_item.added"),
3602
+ output_index: z18.number(),
3603
+ item: z18.discriminatedUnion("type", [
3604
+ z18.object({
3605
+ type: z18.literal("message"),
3606
+ id: z18.string()
3515
3607
  }),
3516
- z17.object({
3517
- type: z17.literal("reasoning"),
3518
- id: z17.string(),
3519
- encrypted_content: z17.string().nullish()
3608
+ z18.object({
3609
+ type: z18.literal("reasoning"),
3610
+ id: z18.string(),
3611
+ encrypted_content: z18.string().nullish()
3520
3612
  }),
3521
- z17.object({
3522
- type: z17.literal("function_call"),
3523
- id: z17.string(),
3524
- call_id: z17.string(),
3525
- name: z17.string(),
3526
- arguments: z17.string()
3613
+ z18.object({
3614
+ type: z18.literal("function_call"),
3615
+ id: z18.string(),
3616
+ call_id: z18.string(),
3617
+ name: z18.string(),
3618
+ arguments: z18.string()
3527
3619
  }),
3528
- z17.object({
3529
- type: z17.literal("web_search_call"),
3530
- id: z17.string(),
3531
- status: z17.string(),
3532
- action: z17.object({
3533
- type: z17.literal("search"),
3534
- query: z17.string().optional()
3620
+ z18.object({
3621
+ type: z18.literal("web_search_call"),
3622
+ id: z18.string(),
3623
+ status: z18.string(),
3624
+ action: z18.object({
3625
+ type: z18.literal("search"),
3626
+ query: z18.string().optional()
3535
3627
  }).nullish()
3536
3628
  }),
3537
- z17.object({
3538
- type: z17.literal("computer_call"),
3539
- id: z17.string(),
3540
- status: z17.string()
3629
+ z18.object({
3630
+ type: z18.literal("computer_call"),
3631
+ id: z18.string(),
3632
+ status: z18.string()
3541
3633
  }),
3542
- z17.object({
3543
- type: z17.literal("file_search_call"),
3544
- id: z17.string(),
3545
- status: z17.string(),
3546
- queries: z17.array(z17.string()).nullish(),
3547
- results: z17.array(
3548
- z17.object({
3549
- attributes: z17.object({
3550
- file_id: z17.string(),
3551
- filename: z17.string(),
3552
- score: z17.number(),
3553
- text: z17.string()
3634
+ z18.object({
3635
+ type: z18.literal("file_search_call"),
3636
+ id: z18.string(),
3637
+ status: z18.string(),
3638
+ queries: z18.array(z18.string()).nullish(),
3639
+ results: z18.array(
3640
+ z18.object({
3641
+ attributes: z18.object({
3642
+ file_id: z18.string(),
3643
+ filename: z18.string(),
3644
+ score: z18.number(),
3645
+ text: z18.string()
3554
3646
  })
3555
3647
  })
3556
3648
  ).optional()
3649
+ }),
3650
+ z18.object({
3651
+ type: z18.literal("image_generation_call"),
3652
+ id: z18.string()
3557
3653
  })
3558
3654
  ])
3559
3655
  });
3560
- var responseOutputItemDoneSchema = z17.object({
3561
- type: z17.literal("response.output_item.done"),
3562
- output_index: z17.number(),
3563
- item: z17.discriminatedUnion("type", [
3564
- z17.object({
3565
- type: z17.literal("message"),
3566
- id: z17.string()
3656
+ var responseOutputItemDoneSchema = z18.object({
3657
+ type: z18.literal("response.output_item.done"),
3658
+ output_index: z18.number(),
3659
+ item: z18.discriminatedUnion("type", [
3660
+ z18.object({
3661
+ type: z18.literal("message"),
3662
+ id: z18.string()
3567
3663
  }),
3568
- z17.object({
3569
- type: z17.literal("reasoning"),
3570
- id: z17.string(),
3571
- encrypted_content: z17.string().nullish()
3664
+ z18.object({
3665
+ type: z18.literal("reasoning"),
3666
+ id: z18.string(),
3667
+ encrypted_content: z18.string().nullish()
3572
3668
  }),
3573
- z17.object({
3574
- type: z17.literal("function_call"),
3575
- id: z17.string(),
3576
- call_id: z17.string(),
3577
- name: z17.string(),
3578
- arguments: z17.string(),
3579
- status: z17.literal("completed")
3669
+ z18.object({
3670
+ type: z18.literal("function_call"),
3671
+ id: z18.string(),
3672
+ call_id: z18.string(),
3673
+ name: z18.string(),
3674
+ arguments: z18.string(),
3675
+ status: z18.literal("completed")
3580
3676
  }),
3581
3677
  codeInterpreterCallItem,
3678
+ imageGenerationCallItem,
3582
3679
  webSearchCallItem,
3583
- z17.object({
3584
- type: z17.literal("computer_call"),
3585
- id: z17.string(),
3586
- status: z17.literal("completed")
3680
+ z18.object({
3681
+ type: z18.literal("computer_call"),
3682
+ id: z18.string(),
3683
+ status: z18.literal("completed")
3587
3684
  }),
3588
- z17.object({
3589
- type: z17.literal("file_search_call"),
3590
- id: z17.string(),
3591
- status: z17.literal("completed"),
3592
- queries: z17.array(z17.string()).nullish(),
3593
- results: z17.array(
3594
- z17.object({
3595
- attributes: z17.object({
3596
- file_id: z17.string(),
3597
- filename: z17.string(),
3598
- score: z17.number(),
3599
- text: z17.string()
3685
+ z18.object({
3686
+ type: z18.literal("file_search_call"),
3687
+ id: z18.string(),
3688
+ status: z18.literal("completed"),
3689
+ queries: z18.array(z18.string()).nullish(),
3690
+ results: z18.array(
3691
+ z18.object({
3692
+ attributes: z18.object({
3693
+ file_id: z18.string(),
3694
+ filename: z18.string(),
3695
+ score: z18.number(),
3696
+ text: z18.string()
3600
3697
  })
3601
3698
  })
3602
3699
  ).nullish()
3603
3700
  })
3604
3701
  ])
3605
3702
  });
3606
- var responseFunctionCallArgumentsDeltaSchema = z17.object({
3607
- type: z17.literal("response.function_call_arguments.delta"),
3608
- item_id: z17.string(),
3609
- output_index: z17.number(),
3610
- delta: z17.string()
3703
+ var responseFunctionCallArgumentsDeltaSchema = z18.object({
3704
+ type: z18.literal("response.function_call_arguments.delta"),
3705
+ item_id: z18.string(),
3706
+ output_index: z18.number(),
3707
+ delta: z18.string()
3611
3708
  });
3612
- var responseAnnotationAddedSchema = z17.object({
3613
- type: z17.literal("response.output_text.annotation.added"),
3614
- annotation: z17.discriminatedUnion("type", [
3615
- z17.object({
3616
- type: z17.literal("url_citation"),
3617
- url: z17.string(),
3618
- title: z17.string()
3709
+ var responseAnnotationAddedSchema = z18.object({
3710
+ type: z18.literal("response.output_text.annotation.added"),
3711
+ annotation: z18.discriminatedUnion("type", [
3712
+ z18.object({
3713
+ type: z18.literal("url_citation"),
3714
+ url: z18.string(),
3715
+ title: z18.string()
3619
3716
  }),
3620
- z17.object({
3621
- type: z17.literal("file_citation"),
3622
- file_id: z17.string(),
3623
- filename: z17.string().nullish(),
3624
- index: z17.number().nullish(),
3625
- start_index: z17.number().nullish(),
3626
- end_index: z17.number().nullish(),
3627
- quote: z17.string().nullish()
3717
+ z18.object({
3718
+ type: z18.literal("file_citation"),
3719
+ file_id: z18.string(),
3720
+ filename: z18.string().nullish(),
3721
+ index: z18.number().nullish(),
3722
+ start_index: z18.number().nullish(),
3723
+ end_index: z18.number().nullish(),
3724
+ quote: z18.string().nullish()
3628
3725
  })
3629
3726
  ])
3630
3727
  });
3631
- var responseReasoningSummaryPartAddedSchema = z17.object({
3632
- type: z17.literal("response.reasoning_summary_part.added"),
3633
- item_id: z17.string(),
3634
- summary_index: z17.number()
3728
+ var responseReasoningSummaryPartAddedSchema = z18.object({
3729
+ type: z18.literal("response.reasoning_summary_part.added"),
3730
+ item_id: z18.string(),
3731
+ summary_index: z18.number()
3635
3732
  });
3636
- var responseReasoningSummaryTextDeltaSchema = z17.object({
3637
- type: z17.literal("response.reasoning_summary_text.delta"),
3638
- item_id: z17.string(),
3639
- summary_index: z17.number(),
3640
- delta: z17.string()
3733
+ var responseReasoningSummaryTextDeltaSchema = z18.object({
3734
+ type: z18.literal("response.reasoning_summary_text.delta"),
3735
+ item_id: z18.string(),
3736
+ summary_index: z18.number(),
3737
+ delta: z18.string()
3641
3738
  });
3642
- var openaiResponsesChunkSchema = z17.union([
3739
+ var openaiResponsesChunkSchema = z18.union([
3643
3740
  textDeltaChunkSchema,
3644
3741
  responseFinishedChunkSchema,
3645
3742
  responseCreatedChunkSchema,
@@ -3650,7 +3747,7 @@ var openaiResponsesChunkSchema = z17.union([
3650
3747
  responseReasoningSummaryPartAddedSchema,
3651
3748
  responseReasoningSummaryTextDeltaSchema,
3652
3749
  errorChunkSchema,
3653
- z17.object({ type: z17.string() }).loose()
3750
+ z18.object({ type: z18.string() }).loose()
3654
3751
  // fallback for unknown chunks
3655
3752
  ]);
3656
3753
  function isTextDeltaChunk(chunk) {
@@ -3723,27 +3820,15 @@ function getResponsesModelConfig(modelId) {
3723
3820
  isReasoningModel: false
3724
3821
  };
3725
3822
  }
3726
- var openaiResponsesProviderOptionsSchema = z17.object({
3727
- metadata: z17.any().nullish(),
3728
- parallelToolCalls: z17.boolean().nullish(),
3729
- previousResponseId: z17.string().nullish(),
3730
- store: z17.boolean().nullish(),
3731
- user: z17.string().nullish(),
3732
- reasoningEffort: z17.string().nullish(),
3733
- strictJsonSchema: z17.boolean().nullish(),
3734
- instructions: z17.string().nullish(),
3735
- reasoningSummary: z17.string().nullish(),
3736
- serviceTier: z17.enum(["auto", "flex", "priority"]).nullish(),
3737
- include: z17.array(
3738
- z17.enum([
3823
+ var openaiResponsesProviderOptionsSchema = z18.object({
3824
+ include: z18.array(
3825
+ z18.enum([
3739
3826
  "reasoning.encrypted_content",
3740
3827
  "file_search_call.results",
3741
3828
  "message.output_text.logprobs"
3742
3829
  ])
3743
3830
  ).nullish(),
3744
- textVerbosity: z17.enum(["low", "medium", "high"]).nullish(),
3745
- promptCacheKey: z17.string().nullish(),
3746
- safetyIdentifier: z17.string().nullish(),
3831
+ instructions: z18.string().nullish(),
3747
3832
  /**
3748
3833
  * Return the log probabilities of the tokens.
3749
3834
  *
@@ -3756,7 +3841,25 @@ var openaiResponsesProviderOptionsSchema = z17.object({
3756
3841
  * @see https://platform.openai.com/docs/api-reference/responses/create
3757
3842
  * @see https://cookbook.openai.com/examples/using_logprobs
3758
3843
  */
3759
- logprobs: z17.union([z17.boolean(), z17.number().min(1).max(TOP_LOGPROBS_MAX)]).optional()
3844
+ logprobs: z18.union([z18.boolean(), z18.number().min(1).max(TOP_LOGPROBS_MAX)]).optional(),
3845
+ /**
3846
+ * The maximum number of total calls to built-in tools that can be processed in a response.
3847
+ * This maximum number applies across all built-in tool calls, not per individual tool.
3848
+ * Any further attempts to call a tool by the model will be ignored.
3849
+ */
3850
+ maxToolCalls: z18.number().nullish(),
3851
+ metadata: z18.any().nullish(),
3852
+ parallelToolCalls: z18.boolean().nullish(),
3853
+ previousResponseId: z18.string().nullish(),
3854
+ promptCacheKey: z18.string().nullish(),
3855
+ reasoningEffort: z18.string().nullish(),
3856
+ reasoningSummary: z18.string().nullish(),
3857
+ safetyIdentifier: z18.string().nullish(),
3858
+ serviceTier: z18.enum(["auto", "flex", "priority"]).nullish(),
3859
+ store: z18.boolean().nullish(),
3860
+ strictJsonSchema: z18.boolean().nullish(),
3861
+ textVerbosity: z18.enum(["low", "medium", "high"]).nullish(),
3862
+ user: z18.string().nullish()
3760
3863
  });
3761
3864
  export {
3762
3865
  OpenAIChatLanguageModel,