@ai-sdk/openai 2.0.25 → 2.0.26

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -1835,11 +1835,53 @@ var codeInterpreter = createProviderDefinedToolFactory3({
1835
1835
  inputSchema: z11.object({})
1836
1836
  });
1837
1837
 
1838
+ // src/tool/web-search.ts
1839
+ import { createProviderDefinedToolFactory as createProviderDefinedToolFactory4 } from "@ai-sdk/provider-utils";
1840
+ import { z as z12 } from "zod/v4";
1841
+ var webSearchArgsSchema = z12.object({
1842
+ filters: z12.object({
1843
+ allowedDomains: z12.array(z12.string()).optional()
1844
+ }).optional(),
1845
+ searchContextSize: z12.enum(["low", "medium", "high"]).optional(),
1846
+ userLocation: z12.object({
1847
+ type: z12.literal("approximate"),
1848
+ country: z12.string().optional(),
1849
+ city: z12.string().optional(),
1850
+ region: z12.string().optional(),
1851
+ timezone: z12.string().optional()
1852
+ }).optional()
1853
+ });
1854
+ var factory = createProviderDefinedToolFactory4({
1855
+ id: "openai.web_search",
1856
+ name: "web_search",
1857
+ inputSchema: z12.object({
1858
+ action: z12.discriminatedUnion("type", [
1859
+ z12.object({
1860
+ type: z12.literal("search"),
1861
+ query: z12.string().nullish()
1862
+ }),
1863
+ z12.object({
1864
+ type: z12.literal("open_page"),
1865
+ url: z12.string()
1866
+ }),
1867
+ z12.object({
1868
+ type: z12.literal("find"),
1869
+ url: z12.string(),
1870
+ pattern: z12.string()
1871
+ })
1872
+ ]).nullish()
1873
+ })
1874
+ });
1875
+ var webSearch = (args = {}) => {
1876
+ return factory(args);
1877
+ };
1878
+
1838
1879
  // src/openai-tools.ts
1839
1880
  var openaiTools = {
1840
1881
  codeInterpreter,
1841
1882
  fileSearch,
1842
- webSearchPreview
1883
+ webSearchPreview,
1884
+ webSearch
1843
1885
  };
1844
1886
 
1845
1887
  // src/responses/openai-responses-language-model.ts
@@ -1854,14 +1896,14 @@ import {
1854
1896
  parseProviderOptions as parseProviderOptions5,
1855
1897
  postJsonToApi as postJsonToApi5
1856
1898
  } from "@ai-sdk/provider-utils";
1857
- import { z as z13 } from "zod/v4";
1899
+ import { z as z14 } from "zod/v4";
1858
1900
 
1859
1901
  // src/responses/convert-to-openai-responses-messages.ts
1860
1902
  import {
1861
1903
  UnsupportedFunctionalityError as UnsupportedFunctionalityError4
1862
1904
  } from "@ai-sdk/provider";
1863
1905
  import { parseProviderOptions as parseProviderOptions4 } from "@ai-sdk/provider-utils";
1864
- import { z as z12 } from "zod/v4";
1906
+ import { z as z13 } from "zod/v4";
1865
1907
  import { convertToBase64 as convertToBase642 } from "@ai-sdk/provider-utils";
1866
1908
  function isFileId(data, prefixes) {
1867
1909
  if (!prefixes) return false;
@@ -2051,26 +2093,26 @@ async function convertToOpenAIResponsesMessages({
2051
2093
  }
2052
2094
  return { messages, warnings };
2053
2095
  }
2054
- var openaiResponsesReasoningProviderOptionsSchema = z12.object({
2055
- itemId: z12.string().nullish(),
2056
- reasoningEncryptedContent: z12.string().nullish()
2096
+ var openaiResponsesReasoningProviderOptionsSchema = z13.object({
2097
+ itemId: z13.string().nullish(),
2098
+ reasoningEncryptedContent: z13.string().nullish()
2057
2099
  });
2058
2100
 
2059
2101
  // src/responses/map-openai-responses-finish-reason.ts
2060
2102
  function mapOpenAIResponseFinishReason({
2061
2103
  finishReason,
2062
- hasToolCalls
2104
+ hasFunctionCall
2063
2105
  }) {
2064
2106
  switch (finishReason) {
2065
2107
  case void 0:
2066
2108
  case null:
2067
- return hasToolCalls ? "tool-calls" : "stop";
2109
+ return hasFunctionCall ? "tool-calls" : "stop";
2068
2110
  case "max_output_tokens":
2069
2111
  return "length";
2070
2112
  case "content_filter":
2071
2113
  return "content-filter";
2072
2114
  default:
2073
- return hasToolCalls ? "tool-calls" : "unknown";
2115
+ return hasFunctionCall ? "tool-calls" : "unknown";
2074
2116
  }
2075
2117
  }
2076
2118
 
@@ -2122,6 +2164,16 @@ function prepareResponsesTools({
2122
2164
  });
2123
2165
  break;
2124
2166
  }
2167
+ case "openai.web_search": {
2168
+ const args = webSearchArgsSchema.parse(tool.args);
2169
+ openaiTools2.push({
2170
+ type: "web_search",
2171
+ filters: args.filters != null ? { allowed_domains: args.filters.allowedDomains } : void 0,
2172
+ search_context_size: args.searchContextSize,
2173
+ user_location: args.userLocation
2174
+ });
2175
+ break;
2176
+ }
2125
2177
  case "openai.code_interpreter": {
2126
2178
  const args = codeInterpreterArgsSchema.parse(tool.args);
2127
2179
  openaiTools2.push({
@@ -2154,7 +2206,7 @@ function prepareResponsesTools({
2154
2206
  case "tool":
2155
2207
  return {
2156
2208
  tools: openaiTools2,
2157
- toolChoice: toolChoice.toolName === "code_interpreter" || toolChoice.toolName === "file_search" || toolChoice.toolName === "web_search_preview" ? { type: toolChoice.toolName } : { type: "function", name: toolChoice.toolName },
2209
+ toolChoice: toolChoice.toolName === "code_interpreter" || toolChoice.toolName === "file_search" || toolChoice.toolName === "web_search_preview" || toolChoice.toolName === "web_search" ? { type: toolChoice.toolName } : { type: "function", name: toolChoice.toolName },
2158
2210
  toolWarnings
2159
2211
  };
2160
2212
  default: {
@@ -2167,35 +2219,35 @@ function prepareResponsesTools({
2167
2219
  }
2168
2220
 
2169
2221
  // src/responses/openai-responses-language-model.ts
2170
- var webSearchCallItem = z13.object({
2171
- type: z13.literal("web_search_call"),
2172
- id: z13.string(),
2173
- status: z13.string(),
2174
- action: z13.discriminatedUnion("type", [
2175
- z13.object({
2176
- type: z13.literal("search"),
2177
- query: z13.string().nullish()
2222
+ var webSearchCallItem = z14.object({
2223
+ type: z14.literal("web_search_call"),
2224
+ id: z14.string(),
2225
+ status: z14.string(),
2226
+ action: z14.discriminatedUnion("type", [
2227
+ z14.object({
2228
+ type: z14.literal("search"),
2229
+ query: z14.string().nullish()
2178
2230
  }),
2179
- z13.object({
2180
- type: z13.literal("open_page"),
2181
- url: z13.string()
2231
+ z14.object({
2232
+ type: z14.literal("open_page"),
2233
+ url: z14.string()
2182
2234
  }),
2183
- z13.object({
2184
- type: z13.literal("find"),
2185
- url: z13.string(),
2186
- pattern: z13.string()
2235
+ z14.object({
2236
+ type: z14.literal("find"),
2237
+ url: z14.string(),
2238
+ pattern: z14.string()
2187
2239
  })
2188
2240
  ]).nullish()
2189
2241
  });
2190
2242
  var TOP_LOGPROBS_MAX = 20;
2191
- var LOGPROBS_SCHEMA = z13.array(
2192
- z13.object({
2193
- token: z13.string(),
2194
- logprob: z13.number(),
2195
- top_logprobs: z13.array(
2196
- z13.object({
2197
- token: z13.string(),
2198
- logprob: z13.number()
2243
+ var LOGPROBS_SCHEMA = z14.array(
2244
+ z14.object({
2245
+ token: z14.string(),
2246
+ logprob: z14.number(),
2247
+ top_logprobs: z14.array(
2248
+ z14.object({
2249
+ token: z14.string(),
2250
+ logprob: z14.number()
2199
2251
  })
2200
2252
  )
2201
2253
  })
@@ -2399,92 +2451,92 @@ var OpenAIResponsesLanguageModel = class {
2399
2451
  body,
2400
2452
  failedResponseHandler: openaiFailedResponseHandler,
2401
2453
  successfulResponseHandler: createJsonResponseHandler5(
2402
- z13.object({
2403
- id: z13.string(),
2404
- created_at: z13.number(),
2405
- error: z13.object({
2406
- code: z13.string(),
2407
- message: z13.string()
2454
+ z14.object({
2455
+ id: z14.string(),
2456
+ created_at: z14.number(),
2457
+ error: z14.object({
2458
+ code: z14.string(),
2459
+ message: z14.string()
2408
2460
  }).nullish(),
2409
- model: z13.string(),
2410
- output: z13.array(
2411
- z13.discriminatedUnion("type", [
2412
- z13.object({
2413
- type: z13.literal("message"),
2414
- role: z13.literal("assistant"),
2415
- id: z13.string(),
2416
- content: z13.array(
2417
- z13.object({
2418
- type: z13.literal("output_text"),
2419
- text: z13.string(),
2461
+ model: z14.string(),
2462
+ output: z14.array(
2463
+ z14.discriminatedUnion("type", [
2464
+ z14.object({
2465
+ type: z14.literal("message"),
2466
+ role: z14.literal("assistant"),
2467
+ id: z14.string(),
2468
+ content: z14.array(
2469
+ z14.object({
2470
+ type: z14.literal("output_text"),
2471
+ text: z14.string(),
2420
2472
  logprobs: LOGPROBS_SCHEMA.nullish(),
2421
- annotations: z13.array(
2422
- z13.discriminatedUnion("type", [
2423
- z13.object({
2424
- type: z13.literal("url_citation"),
2425
- start_index: z13.number(),
2426
- end_index: z13.number(),
2427
- url: z13.string(),
2428
- title: z13.string()
2473
+ annotations: z14.array(
2474
+ z14.discriminatedUnion("type", [
2475
+ z14.object({
2476
+ type: z14.literal("url_citation"),
2477
+ start_index: z14.number(),
2478
+ end_index: z14.number(),
2479
+ url: z14.string(),
2480
+ title: z14.string()
2429
2481
  }),
2430
- z13.object({
2431
- type: z13.literal("file_citation"),
2432
- file_id: z13.string(),
2433
- filename: z13.string().nullish(),
2434
- index: z13.number().nullish(),
2435
- start_index: z13.number().nullish(),
2436
- end_index: z13.number().nullish(),
2437
- quote: z13.string().nullish()
2482
+ z14.object({
2483
+ type: z14.literal("file_citation"),
2484
+ file_id: z14.string(),
2485
+ filename: z14.string().nullish(),
2486
+ index: z14.number().nullish(),
2487
+ start_index: z14.number().nullish(),
2488
+ end_index: z14.number().nullish(),
2489
+ quote: z14.string().nullish()
2438
2490
  })
2439
2491
  ])
2440
2492
  )
2441
2493
  })
2442
2494
  )
2443
2495
  }),
2444
- z13.object({
2445
- type: z13.literal("function_call"),
2446
- call_id: z13.string(),
2447
- name: z13.string(),
2448
- arguments: z13.string(),
2449
- id: z13.string()
2496
+ z14.object({
2497
+ type: z14.literal("function_call"),
2498
+ call_id: z14.string(),
2499
+ name: z14.string(),
2500
+ arguments: z14.string(),
2501
+ id: z14.string()
2450
2502
  }),
2451
2503
  webSearchCallItem,
2452
- z13.object({
2453
- type: z13.literal("computer_call"),
2454
- id: z13.string(),
2455
- status: z13.string().optional()
2504
+ z14.object({
2505
+ type: z14.literal("computer_call"),
2506
+ id: z14.string(),
2507
+ status: z14.string().optional()
2456
2508
  }),
2457
- z13.object({
2458
- type: z13.literal("file_search_call"),
2459
- id: z13.string(),
2460
- status: z13.string().optional(),
2461
- queries: z13.array(z13.string()).nullish(),
2462
- results: z13.array(
2463
- z13.object({
2464
- attributes: z13.object({
2465
- file_id: z13.string(),
2466
- filename: z13.string(),
2467
- score: z13.number(),
2468
- text: z13.string()
2509
+ z14.object({
2510
+ type: z14.literal("file_search_call"),
2511
+ id: z14.string(),
2512
+ status: z14.string().optional(),
2513
+ queries: z14.array(z14.string()).nullish(),
2514
+ results: z14.array(
2515
+ z14.object({
2516
+ attributes: z14.object({
2517
+ file_id: z14.string(),
2518
+ filename: z14.string(),
2519
+ score: z14.number(),
2520
+ text: z14.string()
2469
2521
  })
2470
2522
  })
2471
2523
  ).nullish()
2472
2524
  }),
2473
- z13.object({
2474
- type: z13.literal("reasoning"),
2475
- id: z13.string(),
2476
- encrypted_content: z13.string().nullish(),
2477
- summary: z13.array(
2478
- z13.object({
2479
- type: z13.literal("summary_text"),
2480
- text: z13.string()
2525
+ z14.object({
2526
+ type: z14.literal("reasoning"),
2527
+ id: z14.string(),
2528
+ encrypted_content: z14.string().nullish(),
2529
+ summary: z14.array(
2530
+ z14.object({
2531
+ type: z14.literal("summary_text"),
2532
+ text: z14.string()
2481
2533
  })
2482
2534
  )
2483
2535
  })
2484
2536
  ])
2485
2537
  ),
2486
- service_tier: z13.string().nullish(),
2487
- incomplete_details: z13.object({ reason: z13.string() }).nullable(),
2538
+ service_tier: z14.string().nullish(),
2539
+ incomplete_details: z14.object({ reason: z14.string() }).nullable(),
2488
2540
  usage: usageSchema2
2489
2541
  })
2490
2542
  ),
@@ -2504,6 +2556,7 @@ var OpenAIResponsesLanguageModel = class {
2504
2556
  }
2505
2557
  const content = [];
2506
2558
  const logprobs = [];
2559
+ let hasFunctionCall = false;
2507
2560
  for (const part of response.output) {
2508
2561
  switch (part.type) {
2509
2562
  case "reasoning": {
@@ -2562,6 +2615,7 @@ var OpenAIResponsesLanguageModel = class {
2562
2615
  break;
2563
2616
  }
2564
2617
  case "function_call": {
2618
+ hasFunctionCall = true;
2565
2619
  content.push({
2566
2620
  type: "tool-call",
2567
2621
  toolCallId: part.call_id,
@@ -2649,7 +2703,7 @@ var OpenAIResponsesLanguageModel = class {
2649
2703
  content,
2650
2704
  finishReason: mapOpenAIResponseFinishReason({
2651
2705
  finishReason: (_m = response.incomplete_details) == null ? void 0 : _m.reason,
2652
- hasToolCalls: content.some((part) => part.type === "tool-call")
2706
+ hasFunctionCall
2653
2707
  }),
2654
2708
  usage: {
2655
2709
  inputTokens: response.usage.input_tokens,
@@ -2699,7 +2753,7 @@ var OpenAIResponsesLanguageModel = class {
2699
2753
  const logprobs = [];
2700
2754
  let responseId = null;
2701
2755
  const ongoingToolCalls = {};
2702
- let hasToolCalls = false;
2756
+ let hasFunctionCall = false;
2703
2757
  const activeReasoning = {};
2704
2758
  let serviceTier;
2705
2759
  return {
@@ -2789,7 +2843,7 @@ var OpenAIResponsesLanguageModel = class {
2789
2843
  } else if (isResponseOutputItemDoneChunk(value)) {
2790
2844
  if (value.item.type === "function_call") {
2791
2845
  ongoingToolCalls[value.output_index] = void 0;
2792
- hasToolCalls = true;
2846
+ hasFunctionCall = true;
2793
2847
  controller.enqueue({
2794
2848
  type: "tool-input-end",
2795
2849
  id: value.item.call_id
@@ -2807,7 +2861,6 @@ var OpenAIResponsesLanguageModel = class {
2807
2861
  });
2808
2862
  } else if (value.item.type === "web_search_call") {
2809
2863
  ongoingToolCalls[value.output_index] = void 0;
2810
- hasToolCalls = true;
2811
2864
  controller.enqueue({
2812
2865
  type: "tool-input-end",
2813
2866
  id: value.item.id
@@ -2815,20 +2868,19 @@ var OpenAIResponsesLanguageModel = class {
2815
2868
  controller.enqueue({
2816
2869
  type: "tool-call",
2817
2870
  toolCallId: value.item.id,
2818
- toolName: "web_search_preview",
2871
+ toolName: "web_search",
2819
2872
  input: JSON.stringify({ action: value.item.action }),
2820
2873
  providerExecuted: true
2821
2874
  });
2822
2875
  controller.enqueue({
2823
2876
  type: "tool-result",
2824
2877
  toolCallId: value.item.id,
2825
- toolName: "web_search_preview",
2878
+ toolName: "web_search",
2826
2879
  result: { status: value.item.status },
2827
2880
  providerExecuted: true
2828
2881
  });
2829
2882
  } else if (value.item.type === "computer_call") {
2830
2883
  ongoingToolCalls[value.output_index] = void 0;
2831
- hasToolCalls = true;
2832
2884
  controller.enqueue({
2833
2885
  type: "tool-input-end",
2834
2886
  id: value.item.id
@@ -2852,7 +2904,6 @@ var OpenAIResponsesLanguageModel = class {
2852
2904
  });
2853
2905
  } else if (value.item.type === "file_search_call") {
2854
2906
  ongoingToolCalls[value.output_index] = void 0;
2855
- hasToolCalls = true;
2856
2907
  controller.enqueue({
2857
2908
  type: "tool-input-end",
2858
2909
  id: value.item.id
@@ -2953,7 +3004,7 @@ var OpenAIResponsesLanguageModel = class {
2953
3004
  } else if (isResponseFinishedChunk(value)) {
2954
3005
  finishReason = mapOpenAIResponseFinishReason({
2955
3006
  finishReason: (_h = value.response.incomplete_details) == null ? void 0 : _h.reason,
2956
- hasToolCalls
3007
+ hasFunctionCall
2957
3008
  });
2958
3009
  usage.inputTokens = value.response.usage.input_tokens;
2959
3010
  usage.outputTokens = value.response.usage.output_tokens;
@@ -3012,176 +3063,176 @@ var OpenAIResponsesLanguageModel = class {
3012
3063
  };
3013
3064
  }
3014
3065
  };
3015
- var usageSchema2 = z13.object({
3016
- input_tokens: z13.number(),
3017
- input_tokens_details: z13.object({ cached_tokens: z13.number().nullish() }).nullish(),
3018
- output_tokens: z13.number(),
3019
- output_tokens_details: z13.object({ reasoning_tokens: z13.number().nullish() }).nullish()
3066
+ var usageSchema2 = z14.object({
3067
+ input_tokens: z14.number(),
3068
+ input_tokens_details: z14.object({ cached_tokens: z14.number().nullish() }).nullish(),
3069
+ output_tokens: z14.number(),
3070
+ output_tokens_details: z14.object({ reasoning_tokens: z14.number().nullish() }).nullish()
3020
3071
  });
3021
- var textDeltaChunkSchema = z13.object({
3022
- type: z13.literal("response.output_text.delta"),
3023
- item_id: z13.string(),
3024
- delta: z13.string(),
3072
+ var textDeltaChunkSchema = z14.object({
3073
+ type: z14.literal("response.output_text.delta"),
3074
+ item_id: z14.string(),
3075
+ delta: z14.string(),
3025
3076
  logprobs: LOGPROBS_SCHEMA.nullish()
3026
3077
  });
3027
- var errorChunkSchema = z13.object({
3028
- type: z13.literal("error"),
3029
- code: z13.string(),
3030
- message: z13.string(),
3031
- param: z13.string().nullish(),
3032
- sequence_number: z13.number()
3078
+ var errorChunkSchema = z14.object({
3079
+ type: z14.literal("error"),
3080
+ code: z14.string(),
3081
+ message: z14.string(),
3082
+ param: z14.string().nullish(),
3083
+ sequence_number: z14.number()
3033
3084
  });
3034
- var responseFinishedChunkSchema = z13.object({
3035
- type: z13.enum(["response.completed", "response.incomplete"]),
3036
- response: z13.object({
3037
- incomplete_details: z13.object({ reason: z13.string() }).nullish(),
3085
+ var responseFinishedChunkSchema = z14.object({
3086
+ type: z14.enum(["response.completed", "response.incomplete"]),
3087
+ response: z14.object({
3088
+ incomplete_details: z14.object({ reason: z14.string() }).nullish(),
3038
3089
  usage: usageSchema2,
3039
- service_tier: z13.string().nullish()
3090
+ service_tier: z14.string().nullish()
3040
3091
  })
3041
3092
  });
3042
- var responseCreatedChunkSchema = z13.object({
3043
- type: z13.literal("response.created"),
3044
- response: z13.object({
3045
- id: z13.string(),
3046
- created_at: z13.number(),
3047
- model: z13.string(),
3048
- service_tier: z13.string().nullish()
3093
+ var responseCreatedChunkSchema = z14.object({
3094
+ type: z14.literal("response.created"),
3095
+ response: z14.object({
3096
+ id: z14.string(),
3097
+ created_at: z14.number(),
3098
+ model: z14.string(),
3099
+ service_tier: z14.string().nullish()
3049
3100
  })
3050
3101
  });
3051
- var responseOutputItemAddedSchema = z13.object({
3052
- type: z13.literal("response.output_item.added"),
3053
- output_index: z13.number(),
3054
- item: z13.discriminatedUnion("type", [
3055
- z13.object({
3056
- type: z13.literal("message"),
3057
- id: z13.string()
3102
+ var responseOutputItemAddedSchema = z14.object({
3103
+ type: z14.literal("response.output_item.added"),
3104
+ output_index: z14.number(),
3105
+ item: z14.discriminatedUnion("type", [
3106
+ z14.object({
3107
+ type: z14.literal("message"),
3108
+ id: z14.string()
3058
3109
  }),
3059
- z13.object({
3060
- type: z13.literal("reasoning"),
3061
- id: z13.string(),
3062
- encrypted_content: z13.string().nullish()
3110
+ z14.object({
3111
+ type: z14.literal("reasoning"),
3112
+ id: z14.string(),
3113
+ encrypted_content: z14.string().nullish()
3063
3114
  }),
3064
- z13.object({
3065
- type: z13.literal("function_call"),
3066
- id: z13.string(),
3067
- call_id: z13.string(),
3068
- name: z13.string(),
3069
- arguments: z13.string()
3115
+ z14.object({
3116
+ type: z14.literal("function_call"),
3117
+ id: z14.string(),
3118
+ call_id: z14.string(),
3119
+ name: z14.string(),
3120
+ arguments: z14.string()
3070
3121
  }),
3071
- z13.object({
3072
- type: z13.literal("web_search_call"),
3073
- id: z13.string(),
3074
- status: z13.string(),
3075
- action: z13.object({
3076
- type: z13.literal("search"),
3077
- query: z13.string().optional()
3122
+ z14.object({
3123
+ type: z14.literal("web_search_call"),
3124
+ id: z14.string(),
3125
+ status: z14.string(),
3126
+ action: z14.object({
3127
+ type: z14.literal("search"),
3128
+ query: z14.string().optional()
3078
3129
  }).nullish()
3079
3130
  }),
3080
- z13.object({
3081
- type: z13.literal("computer_call"),
3082
- id: z13.string(),
3083
- status: z13.string()
3131
+ z14.object({
3132
+ type: z14.literal("computer_call"),
3133
+ id: z14.string(),
3134
+ status: z14.string()
3084
3135
  }),
3085
- z13.object({
3086
- type: z13.literal("file_search_call"),
3087
- id: z13.string(),
3088
- status: z13.string(),
3089
- queries: z13.array(z13.string()).nullish(),
3090
- results: z13.array(
3091
- z13.object({
3092
- attributes: z13.object({
3093
- file_id: z13.string(),
3094
- filename: z13.string(),
3095
- score: z13.number(),
3096
- text: z13.string()
3136
+ z14.object({
3137
+ type: z14.literal("file_search_call"),
3138
+ id: z14.string(),
3139
+ status: z14.string(),
3140
+ queries: z14.array(z14.string()).nullish(),
3141
+ results: z14.array(
3142
+ z14.object({
3143
+ attributes: z14.object({
3144
+ file_id: z14.string(),
3145
+ filename: z14.string(),
3146
+ score: z14.number(),
3147
+ text: z14.string()
3097
3148
  })
3098
3149
  })
3099
3150
  ).optional()
3100
3151
  })
3101
3152
  ])
3102
3153
  });
3103
- var responseOutputItemDoneSchema = z13.object({
3104
- type: z13.literal("response.output_item.done"),
3105
- output_index: z13.number(),
3106
- item: z13.discriminatedUnion("type", [
3107
- z13.object({
3108
- type: z13.literal("message"),
3109
- id: z13.string()
3154
+ var responseOutputItemDoneSchema = z14.object({
3155
+ type: z14.literal("response.output_item.done"),
3156
+ output_index: z14.number(),
3157
+ item: z14.discriminatedUnion("type", [
3158
+ z14.object({
3159
+ type: z14.literal("message"),
3160
+ id: z14.string()
3110
3161
  }),
3111
- z13.object({
3112
- type: z13.literal("reasoning"),
3113
- id: z13.string(),
3114
- encrypted_content: z13.string().nullish()
3162
+ z14.object({
3163
+ type: z14.literal("reasoning"),
3164
+ id: z14.string(),
3165
+ encrypted_content: z14.string().nullish()
3115
3166
  }),
3116
- z13.object({
3117
- type: z13.literal("function_call"),
3118
- id: z13.string(),
3119
- call_id: z13.string(),
3120
- name: z13.string(),
3121
- arguments: z13.string(),
3122
- status: z13.literal("completed")
3167
+ z14.object({
3168
+ type: z14.literal("function_call"),
3169
+ id: z14.string(),
3170
+ call_id: z14.string(),
3171
+ name: z14.string(),
3172
+ arguments: z14.string(),
3173
+ status: z14.literal("completed")
3123
3174
  }),
3124
3175
  webSearchCallItem,
3125
- z13.object({
3126
- type: z13.literal("computer_call"),
3127
- id: z13.string(),
3128
- status: z13.literal("completed")
3176
+ z14.object({
3177
+ type: z14.literal("computer_call"),
3178
+ id: z14.string(),
3179
+ status: z14.literal("completed")
3129
3180
  }),
3130
- z13.object({
3131
- type: z13.literal("file_search_call"),
3132
- id: z13.string(),
3133
- status: z13.literal("completed"),
3134
- queries: z13.array(z13.string()).nullish(),
3135
- results: z13.array(
3136
- z13.object({
3137
- attributes: z13.object({
3138
- file_id: z13.string(),
3139
- filename: z13.string(),
3140
- score: z13.number(),
3141
- text: z13.string()
3181
+ z14.object({
3182
+ type: z14.literal("file_search_call"),
3183
+ id: z14.string(),
3184
+ status: z14.literal("completed"),
3185
+ queries: z14.array(z14.string()).nullish(),
3186
+ results: z14.array(
3187
+ z14.object({
3188
+ attributes: z14.object({
3189
+ file_id: z14.string(),
3190
+ filename: z14.string(),
3191
+ score: z14.number(),
3192
+ text: z14.string()
3142
3193
  })
3143
3194
  })
3144
3195
  ).nullish()
3145
3196
  })
3146
3197
  ])
3147
3198
  });
3148
- var responseFunctionCallArgumentsDeltaSchema = z13.object({
3149
- type: z13.literal("response.function_call_arguments.delta"),
3150
- item_id: z13.string(),
3151
- output_index: z13.number(),
3152
- delta: z13.string()
3199
+ var responseFunctionCallArgumentsDeltaSchema = z14.object({
3200
+ type: z14.literal("response.function_call_arguments.delta"),
3201
+ item_id: z14.string(),
3202
+ output_index: z14.number(),
3203
+ delta: z14.string()
3153
3204
  });
3154
- var responseAnnotationAddedSchema = z13.object({
3155
- type: z13.literal("response.output_text.annotation.added"),
3156
- annotation: z13.discriminatedUnion("type", [
3157
- z13.object({
3158
- type: z13.literal("url_citation"),
3159
- url: z13.string(),
3160
- title: z13.string()
3205
+ var responseAnnotationAddedSchema = z14.object({
3206
+ type: z14.literal("response.output_text.annotation.added"),
3207
+ annotation: z14.discriminatedUnion("type", [
3208
+ z14.object({
3209
+ type: z14.literal("url_citation"),
3210
+ url: z14.string(),
3211
+ title: z14.string()
3161
3212
  }),
3162
- z13.object({
3163
- type: z13.literal("file_citation"),
3164
- file_id: z13.string(),
3165
- filename: z13.string().nullish(),
3166
- index: z13.number().nullish(),
3167
- start_index: z13.number().nullish(),
3168
- end_index: z13.number().nullish(),
3169
- quote: z13.string().nullish()
3213
+ z14.object({
3214
+ type: z14.literal("file_citation"),
3215
+ file_id: z14.string(),
3216
+ filename: z14.string().nullish(),
3217
+ index: z14.number().nullish(),
3218
+ start_index: z14.number().nullish(),
3219
+ end_index: z14.number().nullish(),
3220
+ quote: z14.string().nullish()
3170
3221
  })
3171
3222
  ])
3172
3223
  });
3173
- var responseReasoningSummaryPartAddedSchema = z13.object({
3174
- type: z13.literal("response.reasoning_summary_part.added"),
3175
- item_id: z13.string(),
3176
- summary_index: z13.number()
3224
+ var responseReasoningSummaryPartAddedSchema = z14.object({
3225
+ type: z14.literal("response.reasoning_summary_part.added"),
3226
+ item_id: z14.string(),
3227
+ summary_index: z14.number()
3177
3228
  });
3178
- var responseReasoningSummaryTextDeltaSchema = z13.object({
3179
- type: z13.literal("response.reasoning_summary_text.delta"),
3180
- item_id: z13.string(),
3181
- summary_index: z13.number(),
3182
- delta: z13.string()
3229
+ var responseReasoningSummaryTextDeltaSchema = z14.object({
3230
+ type: z14.literal("response.reasoning_summary_text.delta"),
3231
+ item_id: z14.string(),
3232
+ summary_index: z14.number(),
3233
+ delta: z14.string()
3183
3234
  });
3184
- var openaiResponsesChunkSchema = z13.union([
3235
+ var openaiResponsesChunkSchema = z14.union([
3185
3236
  textDeltaChunkSchema,
3186
3237
  responseFinishedChunkSchema,
3187
3238
  responseCreatedChunkSchema,
@@ -3192,7 +3243,7 @@ var openaiResponsesChunkSchema = z13.union([
3192
3243
  responseReasoningSummaryPartAddedSchema,
3193
3244
  responseReasoningSummaryTextDeltaSchema,
3194
3245
  errorChunkSchema,
3195
- z13.object({ type: z13.string() }).loose()
3246
+ z14.object({ type: z14.string() }).loose()
3196
3247
  // fallback for unknown chunks
3197
3248
  ]);
3198
3249
  function isTextDeltaChunk(chunk) {
@@ -3265,27 +3316,27 @@ function getResponsesModelConfig(modelId) {
3265
3316
  isReasoningModel: false
3266
3317
  };
3267
3318
  }
3268
- var openaiResponsesProviderOptionsSchema = z13.object({
3269
- metadata: z13.any().nullish(),
3270
- parallelToolCalls: z13.boolean().nullish(),
3271
- previousResponseId: z13.string().nullish(),
3272
- store: z13.boolean().nullish(),
3273
- user: z13.string().nullish(),
3274
- reasoningEffort: z13.string().nullish(),
3275
- strictJsonSchema: z13.boolean().nullish(),
3276
- instructions: z13.string().nullish(),
3277
- reasoningSummary: z13.string().nullish(),
3278
- serviceTier: z13.enum(["auto", "flex", "priority"]).nullish(),
3279
- include: z13.array(
3280
- z13.enum([
3319
+ var openaiResponsesProviderOptionsSchema = z14.object({
3320
+ metadata: z14.any().nullish(),
3321
+ parallelToolCalls: z14.boolean().nullish(),
3322
+ previousResponseId: z14.string().nullish(),
3323
+ store: z14.boolean().nullish(),
3324
+ user: z14.string().nullish(),
3325
+ reasoningEffort: z14.string().nullish(),
3326
+ strictJsonSchema: z14.boolean().nullish(),
3327
+ instructions: z14.string().nullish(),
3328
+ reasoningSummary: z14.string().nullish(),
3329
+ serviceTier: z14.enum(["auto", "flex", "priority"]).nullish(),
3330
+ include: z14.array(
3331
+ z14.enum([
3281
3332
  "reasoning.encrypted_content",
3282
3333
  "file_search_call.results",
3283
3334
  "message.output_text.logprobs"
3284
3335
  ])
3285
3336
  ).nullish(),
3286
- textVerbosity: z13.enum(["low", "medium", "high"]).nullish(),
3287
- promptCacheKey: z13.string().nullish(),
3288
- safetyIdentifier: z13.string().nullish(),
3337
+ textVerbosity: z14.enum(["low", "medium", "high"]).nullish(),
3338
+ promptCacheKey: z14.string().nullish(),
3339
+ safetyIdentifier: z14.string().nullish(),
3289
3340
  /**
3290
3341
  * Return the log probabilities of the tokens.
3291
3342
  *
@@ -3298,7 +3349,7 @@ var openaiResponsesProviderOptionsSchema = z13.object({
3298
3349
  * @see https://platform.openai.com/docs/api-reference/responses/create
3299
3350
  * @see https://cookbook.openai.com/examples/using_logprobs
3300
3351
  */
3301
- logprobs: z13.union([z13.boolean(), z13.number().min(1).max(TOP_LOGPROBS_MAX)]).optional()
3352
+ logprobs: z14.union([z14.boolean(), z14.number().min(1).max(TOP_LOGPROBS_MAX)]).optional()
3302
3353
  });
3303
3354
 
3304
3355
  // src/speech/openai-speech-model.ts
@@ -3308,10 +3359,10 @@ import {
3308
3359
  parseProviderOptions as parseProviderOptions6,
3309
3360
  postJsonToApi as postJsonToApi6
3310
3361
  } from "@ai-sdk/provider-utils";
3311
- import { z as z14 } from "zod/v4";
3312
- var OpenAIProviderOptionsSchema = z14.object({
3313
- instructions: z14.string().nullish(),
3314
- speed: z14.number().min(0.25).max(4).default(1).nullish()
3362
+ import { z as z15 } from "zod/v4";
3363
+ var OpenAIProviderOptionsSchema = z15.object({
3364
+ instructions: z15.string().nullish(),
3365
+ speed: z15.number().min(0.25).max(4).default(1).nullish()
3315
3366
  });
3316
3367
  var OpenAISpeechModel = class {
3317
3368
  constructor(modelId, config) {
@@ -3422,33 +3473,33 @@ import {
3422
3473
  parseProviderOptions as parseProviderOptions7,
3423
3474
  postFormDataToApi
3424
3475
  } from "@ai-sdk/provider-utils";
3425
- import { z as z16 } from "zod/v4";
3476
+ import { z as z17 } from "zod/v4";
3426
3477
 
3427
3478
  // src/transcription/openai-transcription-options.ts
3428
- import { z as z15 } from "zod/v4";
3429
- var openAITranscriptionProviderOptions = z15.object({
3479
+ import { z as z16 } from "zod/v4";
3480
+ var openAITranscriptionProviderOptions = z16.object({
3430
3481
  /**
3431
3482
  * Additional information to include in the transcription response.
3432
3483
  */
3433
- include: z15.array(z15.string()).optional(),
3484
+ include: z16.array(z16.string()).optional(),
3434
3485
  /**
3435
3486
  * The language of the input audio in ISO-639-1 format.
3436
3487
  */
3437
- language: z15.string().optional(),
3488
+ language: z16.string().optional(),
3438
3489
  /**
3439
3490
  * An optional text to guide the model's style or continue a previous audio segment.
3440
3491
  */
3441
- prompt: z15.string().optional(),
3492
+ prompt: z16.string().optional(),
3442
3493
  /**
3443
3494
  * The sampling temperature, between 0 and 1.
3444
3495
  * @default 0
3445
3496
  */
3446
- temperature: z15.number().min(0).max(1).default(0).optional(),
3497
+ temperature: z16.number().min(0).max(1).default(0).optional(),
3447
3498
  /**
3448
3499
  * The timestamp granularities to populate for this transcription.
3449
3500
  * @default ['segment']
3450
3501
  */
3451
- timestampGranularities: z15.array(z15.enum(["word", "segment"])).default(["segment"]).optional()
3502
+ timestampGranularities: z16.array(z16.enum(["word", "segment"])).default(["segment"]).optional()
3452
3503
  });
3453
3504
 
3454
3505
  // src/transcription/openai-transcription-model.ts
@@ -3617,29 +3668,29 @@ var OpenAITranscriptionModel = class {
3617
3668
  };
3618
3669
  }
3619
3670
  };
3620
- var openaiTranscriptionResponseSchema = z16.object({
3621
- text: z16.string(),
3622
- language: z16.string().nullish(),
3623
- duration: z16.number().nullish(),
3624
- words: z16.array(
3625
- z16.object({
3626
- word: z16.string(),
3627
- start: z16.number(),
3628
- end: z16.number()
3671
+ var openaiTranscriptionResponseSchema = z17.object({
3672
+ text: z17.string(),
3673
+ language: z17.string().nullish(),
3674
+ duration: z17.number().nullish(),
3675
+ words: z17.array(
3676
+ z17.object({
3677
+ word: z17.string(),
3678
+ start: z17.number(),
3679
+ end: z17.number()
3629
3680
  })
3630
3681
  ).nullish(),
3631
- segments: z16.array(
3632
- z16.object({
3633
- id: z16.number(),
3634
- seek: z16.number(),
3635
- start: z16.number(),
3636
- end: z16.number(),
3637
- text: z16.string(),
3638
- tokens: z16.array(z16.number()),
3639
- temperature: z16.number(),
3640
- avg_logprob: z16.number(),
3641
- compression_ratio: z16.number(),
3642
- no_speech_prob: z16.number()
3682
+ segments: z17.array(
3683
+ z17.object({
3684
+ id: z17.number(),
3685
+ seek: z17.number(),
3686
+ start: z17.number(),
3687
+ end: z17.number(),
3688
+ text: z17.string(),
3689
+ tokens: z17.array(z17.number()),
3690
+ temperature: z17.number(),
3691
+ avg_logprob: z17.number(),
3692
+ compression_ratio: z17.number(),
3693
+ no_speech_prob: z17.number()
3643
3694
  })
3644
3695
  ).nullish()
3645
3696
  });