@ai-sdk/openai 2.0.25 → 2.0.27

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -359,23 +359,11 @@ var compoundFilterSchema = z3.object({
359
359
  });
360
360
  var filtersSchema = z3.union([comparisonFilterSchema, compoundFilterSchema]);
361
361
  var fileSearchArgsSchema = z3.object({
362
- /**
363
- * List of vector store IDs to search through. If not provided, searches all available vector stores.
364
- */
365
362
  vectorStoreIds: z3.array(z3.string()).optional(),
366
- /**
367
- * Maximum number of search results to return. Defaults to 10.
368
- */
369
363
  maxNumResults: z3.number().optional(),
370
- /**
371
- * Ranking options for the search.
372
- */
373
364
  ranking: z3.object({
374
365
  ranker: z3.enum(["auto", "default-2024-08-21"]).optional()
375
366
  }).optional(),
376
- /**
377
- * A filter to apply based on file attributes.
378
- */
379
367
  filters: filtersSchema.optional()
380
368
  });
381
369
  var fileSearch = createProviderDefinedToolFactory({
@@ -1835,11 +1823,95 @@ var codeInterpreter = createProviderDefinedToolFactory3({
1835
1823
  inputSchema: z11.object({})
1836
1824
  });
1837
1825
 
1826
+ // src/tool/web-search.ts
1827
+ import { createProviderDefinedToolFactory as createProviderDefinedToolFactory4 } from "@ai-sdk/provider-utils";
1828
+ import { z as z12 } from "zod/v4";
1829
+ var webSearchArgsSchema = z12.object({
1830
+ filters: z12.object({
1831
+ allowedDomains: z12.array(z12.string()).optional()
1832
+ }).optional(),
1833
+ searchContextSize: z12.enum(["low", "medium", "high"]).optional(),
1834
+ userLocation: z12.object({
1835
+ type: z12.literal("approximate"),
1836
+ country: z12.string().optional(),
1837
+ city: z12.string().optional(),
1838
+ region: z12.string().optional(),
1839
+ timezone: z12.string().optional()
1840
+ }).optional()
1841
+ });
1842
+ var factory = createProviderDefinedToolFactory4({
1843
+ id: "openai.web_search",
1844
+ name: "web_search",
1845
+ inputSchema: z12.object({
1846
+ action: z12.discriminatedUnion("type", [
1847
+ z12.object({
1848
+ type: z12.literal("search"),
1849
+ query: z12.string().nullish()
1850
+ }),
1851
+ z12.object({
1852
+ type: z12.literal("open_page"),
1853
+ url: z12.string()
1854
+ }),
1855
+ z12.object({
1856
+ type: z12.literal("find"),
1857
+ url: z12.string(),
1858
+ pattern: z12.string()
1859
+ })
1860
+ ]).nullish()
1861
+ })
1862
+ });
1863
+ var webSearch = (args = {}) => {
1864
+ return factory(args);
1865
+ };
1866
+
1838
1867
  // src/openai-tools.ts
1839
1868
  var openaiTools = {
1869
+ /**
1870
+ * The Code Interpreter tool allows models to write and run Python code in a
1871
+ * sandboxed environment to solve complex problems in domains like data analysis,
1872
+ * coding, and math.
1873
+ *
1874
+ * @param container - The container to use for the code interpreter.
1875
+ *
1876
+ * Must have name `code_interpreter`.
1877
+ */
1840
1878
  codeInterpreter,
1879
+ /**
1880
+ * File search is a tool available in the Responses API. It enables models to
1881
+ * retrieve information in a knowledge base of previously uploaded files through
1882
+ * semantic and keyword search.
1883
+ *
1884
+ * Must have name `file_search`.
1885
+ *
1886
+ * @param vectorStoreIds - The vector store IDs to use for the file search.
1887
+ * @param maxNumResults - The maximum number of results to return.
1888
+ * @param ranking - The ranking options to use for the file search.
1889
+ * @param filters - The filters to use for the file search.
1890
+ */
1841
1891
  fileSearch,
1842
- webSearchPreview
1892
+ /**
1893
+ * Web search allows models to access up-to-date information from the internet
1894
+ * and provide answers with sourced citations.
1895
+ *
1896
+ * Must have name `web_search_preview`.
1897
+ *
1898
+ * @param searchContextSize - The search context size to use for the web search.
1899
+ * @param userLocation - The user location to use for the web search.
1900
+ *
1901
+ * @deprecated Use `webSearch` instead.
1902
+ */
1903
+ webSearchPreview,
1904
+ /**
1905
+ * Web search allows models to access up-to-date information from the internet
1906
+ * and provide answers with sourced citations.
1907
+ *
1908
+ * Must have name `web_search`.
1909
+ *
1910
+ * @param filters - The filters to use for the web search.
1911
+ * @param searchContextSize - The search context size to use for the web search.
1912
+ * @param userLocation - The user location to use for the web search.
1913
+ */
1914
+ webSearch
1843
1915
  };
1844
1916
 
1845
1917
  // src/responses/openai-responses-language-model.ts
@@ -1854,14 +1926,14 @@ import {
1854
1926
  parseProviderOptions as parseProviderOptions5,
1855
1927
  postJsonToApi as postJsonToApi5
1856
1928
  } from "@ai-sdk/provider-utils";
1857
- import { z as z13 } from "zod/v4";
1929
+ import { z as z14 } from "zod/v4";
1858
1930
 
1859
1931
  // src/responses/convert-to-openai-responses-messages.ts
1860
1932
  import {
1861
1933
  UnsupportedFunctionalityError as UnsupportedFunctionalityError4
1862
1934
  } from "@ai-sdk/provider";
1863
1935
  import { parseProviderOptions as parseProviderOptions4 } from "@ai-sdk/provider-utils";
1864
- import { z as z12 } from "zod/v4";
1936
+ import { z as z13 } from "zod/v4";
1865
1937
  import { convertToBase64 as convertToBase642 } from "@ai-sdk/provider-utils";
1866
1938
  function isFileId(data, prefixes) {
1867
1939
  if (!prefixes) return false;
@@ -2051,26 +2123,26 @@ async function convertToOpenAIResponsesMessages({
2051
2123
  }
2052
2124
  return { messages, warnings };
2053
2125
  }
2054
- var openaiResponsesReasoningProviderOptionsSchema = z12.object({
2055
- itemId: z12.string().nullish(),
2056
- reasoningEncryptedContent: z12.string().nullish()
2126
+ var openaiResponsesReasoningProviderOptionsSchema = z13.object({
2127
+ itemId: z13.string().nullish(),
2128
+ reasoningEncryptedContent: z13.string().nullish()
2057
2129
  });
2058
2130
 
2059
2131
  // src/responses/map-openai-responses-finish-reason.ts
2060
2132
  function mapOpenAIResponseFinishReason({
2061
2133
  finishReason,
2062
- hasToolCalls
2134
+ hasFunctionCall
2063
2135
  }) {
2064
2136
  switch (finishReason) {
2065
2137
  case void 0:
2066
2138
  case null:
2067
- return hasToolCalls ? "tool-calls" : "stop";
2139
+ return hasFunctionCall ? "tool-calls" : "stop";
2068
2140
  case "max_output_tokens":
2069
2141
  return "length";
2070
2142
  case "content_filter":
2071
2143
  return "content-filter";
2072
2144
  default:
2073
- return hasToolCalls ? "tool-calls" : "unknown";
2145
+ return hasFunctionCall ? "tool-calls" : "unknown";
2074
2146
  }
2075
2147
  }
2076
2148
 
@@ -2122,6 +2194,16 @@ function prepareResponsesTools({
2122
2194
  });
2123
2195
  break;
2124
2196
  }
2197
+ case "openai.web_search": {
2198
+ const args = webSearchArgsSchema.parse(tool.args);
2199
+ openaiTools2.push({
2200
+ type: "web_search",
2201
+ filters: args.filters != null ? { allowed_domains: args.filters.allowedDomains } : void 0,
2202
+ search_context_size: args.searchContextSize,
2203
+ user_location: args.userLocation
2204
+ });
2205
+ break;
2206
+ }
2125
2207
  case "openai.code_interpreter": {
2126
2208
  const args = codeInterpreterArgsSchema.parse(tool.args);
2127
2209
  openaiTools2.push({
@@ -2154,7 +2236,7 @@ function prepareResponsesTools({
2154
2236
  case "tool":
2155
2237
  return {
2156
2238
  tools: openaiTools2,
2157
- toolChoice: toolChoice.toolName === "code_interpreter" || toolChoice.toolName === "file_search" || toolChoice.toolName === "web_search_preview" ? { type: toolChoice.toolName } : { type: "function", name: toolChoice.toolName },
2239
+ toolChoice: toolChoice.toolName === "code_interpreter" || toolChoice.toolName === "file_search" || toolChoice.toolName === "web_search_preview" || toolChoice.toolName === "web_search" ? { type: toolChoice.toolName } : { type: "function", name: toolChoice.toolName },
2158
2240
  toolWarnings
2159
2241
  };
2160
2242
  default: {
@@ -2167,35 +2249,35 @@ function prepareResponsesTools({
2167
2249
  }
2168
2250
 
2169
2251
  // src/responses/openai-responses-language-model.ts
2170
- var webSearchCallItem = z13.object({
2171
- type: z13.literal("web_search_call"),
2172
- id: z13.string(),
2173
- status: z13.string(),
2174
- action: z13.discriminatedUnion("type", [
2175
- z13.object({
2176
- type: z13.literal("search"),
2177
- query: z13.string().nullish()
2252
+ var webSearchCallItem = z14.object({
2253
+ type: z14.literal("web_search_call"),
2254
+ id: z14.string(),
2255
+ status: z14.string(),
2256
+ action: z14.discriminatedUnion("type", [
2257
+ z14.object({
2258
+ type: z14.literal("search"),
2259
+ query: z14.string().nullish()
2178
2260
  }),
2179
- z13.object({
2180
- type: z13.literal("open_page"),
2181
- url: z13.string()
2261
+ z14.object({
2262
+ type: z14.literal("open_page"),
2263
+ url: z14.string()
2182
2264
  }),
2183
- z13.object({
2184
- type: z13.literal("find"),
2185
- url: z13.string(),
2186
- pattern: z13.string()
2265
+ z14.object({
2266
+ type: z14.literal("find"),
2267
+ url: z14.string(),
2268
+ pattern: z14.string()
2187
2269
  })
2188
2270
  ]).nullish()
2189
2271
  });
2190
2272
  var TOP_LOGPROBS_MAX = 20;
2191
- var LOGPROBS_SCHEMA = z13.array(
2192
- z13.object({
2193
- token: z13.string(),
2194
- logprob: z13.number(),
2195
- top_logprobs: z13.array(
2196
- z13.object({
2197
- token: z13.string(),
2198
- logprob: z13.number()
2273
+ var LOGPROBS_SCHEMA = z14.array(
2274
+ z14.object({
2275
+ token: z14.string(),
2276
+ logprob: z14.number(),
2277
+ top_logprobs: z14.array(
2278
+ z14.object({
2279
+ token: z14.string(),
2280
+ logprob: z14.number()
2199
2281
  })
2200
2282
  )
2201
2283
  })
@@ -2399,92 +2481,92 @@ var OpenAIResponsesLanguageModel = class {
2399
2481
  body,
2400
2482
  failedResponseHandler: openaiFailedResponseHandler,
2401
2483
  successfulResponseHandler: createJsonResponseHandler5(
2402
- z13.object({
2403
- id: z13.string(),
2404
- created_at: z13.number(),
2405
- error: z13.object({
2406
- code: z13.string(),
2407
- message: z13.string()
2484
+ z14.object({
2485
+ id: z14.string(),
2486
+ created_at: z14.number(),
2487
+ error: z14.object({
2488
+ code: z14.string(),
2489
+ message: z14.string()
2408
2490
  }).nullish(),
2409
- model: z13.string(),
2410
- output: z13.array(
2411
- z13.discriminatedUnion("type", [
2412
- z13.object({
2413
- type: z13.literal("message"),
2414
- role: z13.literal("assistant"),
2415
- id: z13.string(),
2416
- content: z13.array(
2417
- z13.object({
2418
- type: z13.literal("output_text"),
2419
- text: z13.string(),
2491
+ model: z14.string(),
2492
+ output: z14.array(
2493
+ z14.discriminatedUnion("type", [
2494
+ z14.object({
2495
+ type: z14.literal("message"),
2496
+ role: z14.literal("assistant"),
2497
+ id: z14.string(),
2498
+ content: z14.array(
2499
+ z14.object({
2500
+ type: z14.literal("output_text"),
2501
+ text: z14.string(),
2420
2502
  logprobs: LOGPROBS_SCHEMA.nullish(),
2421
- annotations: z13.array(
2422
- z13.discriminatedUnion("type", [
2423
- z13.object({
2424
- type: z13.literal("url_citation"),
2425
- start_index: z13.number(),
2426
- end_index: z13.number(),
2427
- url: z13.string(),
2428
- title: z13.string()
2503
+ annotations: z14.array(
2504
+ z14.discriminatedUnion("type", [
2505
+ z14.object({
2506
+ type: z14.literal("url_citation"),
2507
+ start_index: z14.number(),
2508
+ end_index: z14.number(),
2509
+ url: z14.string(),
2510
+ title: z14.string()
2429
2511
  }),
2430
- z13.object({
2431
- type: z13.literal("file_citation"),
2432
- file_id: z13.string(),
2433
- filename: z13.string().nullish(),
2434
- index: z13.number().nullish(),
2435
- start_index: z13.number().nullish(),
2436
- end_index: z13.number().nullish(),
2437
- quote: z13.string().nullish()
2512
+ z14.object({
2513
+ type: z14.literal("file_citation"),
2514
+ file_id: z14.string(),
2515
+ filename: z14.string().nullish(),
2516
+ index: z14.number().nullish(),
2517
+ start_index: z14.number().nullish(),
2518
+ end_index: z14.number().nullish(),
2519
+ quote: z14.string().nullish()
2438
2520
  })
2439
2521
  ])
2440
2522
  )
2441
2523
  })
2442
2524
  )
2443
2525
  }),
2444
- z13.object({
2445
- type: z13.literal("function_call"),
2446
- call_id: z13.string(),
2447
- name: z13.string(),
2448
- arguments: z13.string(),
2449
- id: z13.string()
2526
+ z14.object({
2527
+ type: z14.literal("function_call"),
2528
+ call_id: z14.string(),
2529
+ name: z14.string(),
2530
+ arguments: z14.string(),
2531
+ id: z14.string()
2450
2532
  }),
2451
2533
  webSearchCallItem,
2452
- z13.object({
2453
- type: z13.literal("computer_call"),
2454
- id: z13.string(),
2455
- status: z13.string().optional()
2534
+ z14.object({
2535
+ type: z14.literal("computer_call"),
2536
+ id: z14.string(),
2537
+ status: z14.string().optional()
2456
2538
  }),
2457
- z13.object({
2458
- type: z13.literal("file_search_call"),
2459
- id: z13.string(),
2460
- status: z13.string().optional(),
2461
- queries: z13.array(z13.string()).nullish(),
2462
- results: z13.array(
2463
- z13.object({
2464
- attributes: z13.object({
2465
- file_id: z13.string(),
2466
- filename: z13.string(),
2467
- score: z13.number(),
2468
- text: z13.string()
2539
+ z14.object({
2540
+ type: z14.literal("file_search_call"),
2541
+ id: z14.string(),
2542
+ status: z14.string().optional(),
2543
+ queries: z14.array(z14.string()).nullish(),
2544
+ results: z14.array(
2545
+ z14.object({
2546
+ attributes: z14.object({
2547
+ file_id: z14.string(),
2548
+ filename: z14.string(),
2549
+ score: z14.number(),
2550
+ text: z14.string()
2469
2551
  })
2470
2552
  })
2471
2553
  ).nullish()
2472
2554
  }),
2473
- z13.object({
2474
- type: z13.literal("reasoning"),
2475
- id: z13.string(),
2476
- encrypted_content: z13.string().nullish(),
2477
- summary: z13.array(
2478
- z13.object({
2479
- type: z13.literal("summary_text"),
2480
- text: z13.string()
2555
+ z14.object({
2556
+ type: z14.literal("reasoning"),
2557
+ id: z14.string(),
2558
+ encrypted_content: z14.string().nullish(),
2559
+ summary: z14.array(
2560
+ z14.object({
2561
+ type: z14.literal("summary_text"),
2562
+ text: z14.string()
2481
2563
  })
2482
2564
  )
2483
2565
  })
2484
2566
  ])
2485
2567
  ),
2486
- service_tier: z13.string().nullish(),
2487
- incomplete_details: z13.object({ reason: z13.string() }).nullable(),
2568
+ service_tier: z14.string().nullish(),
2569
+ incomplete_details: z14.object({ reason: z14.string() }).nullable(),
2488
2570
  usage: usageSchema2
2489
2571
  })
2490
2572
  ),
@@ -2504,6 +2586,7 @@ var OpenAIResponsesLanguageModel = class {
2504
2586
  }
2505
2587
  const content = [];
2506
2588
  const logprobs = [];
2589
+ let hasFunctionCall = false;
2507
2590
  for (const part of response.output) {
2508
2591
  switch (part.type) {
2509
2592
  case "reasoning": {
@@ -2562,6 +2645,7 @@ var OpenAIResponsesLanguageModel = class {
2562
2645
  break;
2563
2646
  }
2564
2647
  case "function_call": {
2648
+ hasFunctionCall = true;
2565
2649
  content.push({
2566
2650
  type: "tool-call",
2567
2651
  toolCallId: part.call_id,
@@ -2649,7 +2733,7 @@ var OpenAIResponsesLanguageModel = class {
2649
2733
  content,
2650
2734
  finishReason: mapOpenAIResponseFinishReason({
2651
2735
  finishReason: (_m = response.incomplete_details) == null ? void 0 : _m.reason,
2652
- hasToolCalls: content.some((part) => part.type === "tool-call")
2736
+ hasFunctionCall
2653
2737
  }),
2654
2738
  usage: {
2655
2739
  inputTokens: response.usage.input_tokens,
@@ -2699,7 +2783,7 @@ var OpenAIResponsesLanguageModel = class {
2699
2783
  const logprobs = [];
2700
2784
  let responseId = null;
2701
2785
  const ongoingToolCalls = {};
2702
- let hasToolCalls = false;
2786
+ let hasFunctionCall = false;
2703
2787
  const activeReasoning = {};
2704
2788
  let serviceTier;
2705
2789
  return {
@@ -2789,7 +2873,7 @@ var OpenAIResponsesLanguageModel = class {
2789
2873
  } else if (isResponseOutputItemDoneChunk(value)) {
2790
2874
  if (value.item.type === "function_call") {
2791
2875
  ongoingToolCalls[value.output_index] = void 0;
2792
- hasToolCalls = true;
2876
+ hasFunctionCall = true;
2793
2877
  controller.enqueue({
2794
2878
  type: "tool-input-end",
2795
2879
  id: value.item.call_id
@@ -2807,7 +2891,6 @@ var OpenAIResponsesLanguageModel = class {
2807
2891
  });
2808
2892
  } else if (value.item.type === "web_search_call") {
2809
2893
  ongoingToolCalls[value.output_index] = void 0;
2810
- hasToolCalls = true;
2811
2894
  controller.enqueue({
2812
2895
  type: "tool-input-end",
2813
2896
  id: value.item.id
@@ -2815,20 +2898,19 @@ var OpenAIResponsesLanguageModel = class {
2815
2898
  controller.enqueue({
2816
2899
  type: "tool-call",
2817
2900
  toolCallId: value.item.id,
2818
- toolName: "web_search_preview",
2901
+ toolName: "web_search",
2819
2902
  input: JSON.stringify({ action: value.item.action }),
2820
2903
  providerExecuted: true
2821
2904
  });
2822
2905
  controller.enqueue({
2823
2906
  type: "tool-result",
2824
2907
  toolCallId: value.item.id,
2825
- toolName: "web_search_preview",
2908
+ toolName: "web_search",
2826
2909
  result: { status: value.item.status },
2827
2910
  providerExecuted: true
2828
2911
  });
2829
2912
  } else if (value.item.type === "computer_call") {
2830
2913
  ongoingToolCalls[value.output_index] = void 0;
2831
- hasToolCalls = true;
2832
2914
  controller.enqueue({
2833
2915
  type: "tool-input-end",
2834
2916
  id: value.item.id
@@ -2852,7 +2934,6 @@ var OpenAIResponsesLanguageModel = class {
2852
2934
  });
2853
2935
  } else if (value.item.type === "file_search_call") {
2854
2936
  ongoingToolCalls[value.output_index] = void 0;
2855
- hasToolCalls = true;
2856
2937
  controller.enqueue({
2857
2938
  type: "tool-input-end",
2858
2939
  id: value.item.id
@@ -2953,7 +3034,7 @@ var OpenAIResponsesLanguageModel = class {
2953
3034
  } else if (isResponseFinishedChunk(value)) {
2954
3035
  finishReason = mapOpenAIResponseFinishReason({
2955
3036
  finishReason: (_h = value.response.incomplete_details) == null ? void 0 : _h.reason,
2956
- hasToolCalls
3037
+ hasFunctionCall
2957
3038
  });
2958
3039
  usage.inputTokens = value.response.usage.input_tokens;
2959
3040
  usage.outputTokens = value.response.usage.output_tokens;
@@ -3012,176 +3093,176 @@ var OpenAIResponsesLanguageModel = class {
3012
3093
  };
3013
3094
  }
3014
3095
  };
3015
- var usageSchema2 = z13.object({
3016
- input_tokens: z13.number(),
3017
- input_tokens_details: z13.object({ cached_tokens: z13.number().nullish() }).nullish(),
3018
- output_tokens: z13.number(),
3019
- output_tokens_details: z13.object({ reasoning_tokens: z13.number().nullish() }).nullish()
3096
+ var usageSchema2 = z14.object({
3097
+ input_tokens: z14.number(),
3098
+ input_tokens_details: z14.object({ cached_tokens: z14.number().nullish() }).nullish(),
3099
+ output_tokens: z14.number(),
3100
+ output_tokens_details: z14.object({ reasoning_tokens: z14.number().nullish() }).nullish()
3020
3101
  });
3021
- var textDeltaChunkSchema = z13.object({
3022
- type: z13.literal("response.output_text.delta"),
3023
- item_id: z13.string(),
3024
- delta: z13.string(),
3102
+ var textDeltaChunkSchema = z14.object({
3103
+ type: z14.literal("response.output_text.delta"),
3104
+ item_id: z14.string(),
3105
+ delta: z14.string(),
3025
3106
  logprobs: LOGPROBS_SCHEMA.nullish()
3026
3107
  });
3027
- var errorChunkSchema = z13.object({
3028
- type: z13.literal("error"),
3029
- code: z13.string(),
3030
- message: z13.string(),
3031
- param: z13.string().nullish(),
3032
- sequence_number: z13.number()
3108
+ var errorChunkSchema = z14.object({
3109
+ type: z14.literal("error"),
3110
+ code: z14.string(),
3111
+ message: z14.string(),
3112
+ param: z14.string().nullish(),
3113
+ sequence_number: z14.number()
3033
3114
  });
3034
- var responseFinishedChunkSchema = z13.object({
3035
- type: z13.enum(["response.completed", "response.incomplete"]),
3036
- response: z13.object({
3037
- incomplete_details: z13.object({ reason: z13.string() }).nullish(),
3115
+ var responseFinishedChunkSchema = z14.object({
3116
+ type: z14.enum(["response.completed", "response.incomplete"]),
3117
+ response: z14.object({
3118
+ incomplete_details: z14.object({ reason: z14.string() }).nullish(),
3038
3119
  usage: usageSchema2,
3039
- service_tier: z13.string().nullish()
3120
+ service_tier: z14.string().nullish()
3040
3121
  })
3041
3122
  });
3042
- var responseCreatedChunkSchema = z13.object({
3043
- type: z13.literal("response.created"),
3044
- response: z13.object({
3045
- id: z13.string(),
3046
- created_at: z13.number(),
3047
- model: z13.string(),
3048
- service_tier: z13.string().nullish()
3123
+ var responseCreatedChunkSchema = z14.object({
3124
+ type: z14.literal("response.created"),
3125
+ response: z14.object({
3126
+ id: z14.string(),
3127
+ created_at: z14.number(),
3128
+ model: z14.string(),
3129
+ service_tier: z14.string().nullish()
3049
3130
  })
3050
3131
  });
3051
- var responseOutputItemAddedSchema = z13.object({
3052
- type: z13.literal("response.output_item.added"),
3053
- output_index: z13.number(),
3054
- item: z13.discriminatedUnion("type", [
3055
- z13.object({
3056
- type: z13.literal("message"),
3057
- id: z13.string()
3132
+ var responseOutputItemAddedSchema = z14.object({
3133
+ type: z14.literal("response.output_item.added"),
3134
+ output_index: z14.number(),
3135
+ item: z14.discriminatedUnion("type", [
3136
+ z14.object({
3137
+ type: z14.literal("message"),
3138
+ id: z14.string()
3058
3139
  }),
3059
- z13.object({
3060
- type: z13.literal("reasoning"),
3061
- id: z13.string(),
3062
- encrypted_content: z13.string().nullish()
3140
+ z14.object({
3141
+ type: z14.literal("reasoning"),
3142
+ id: z14.string(),
3143
+ encrypted_content: z14.string().nullish()
3063
3144
  }),
3064
- z13.object({
3065
- type: z13.literal("function_call"),
3066
- id: z13.string(),
3067
- call_id: z13.string(),
3068
- name: z13.string(),
3069
- arguments: z13.string()
3145
+ z14.object({
3146
+ type: z14.literal("function_call"),
3147
+ id: z14.string(),
3148
+ call_id: z14.string(),
3149
+ name: z14.string(),
3150
+ arguments: z14.string()
3070
3151
  }),
3071
- z13.object({
3072
- type: z13.literal("web_search_call"),
3073
- id: z13.string(),
3074
- status: z13.string(),
3075
- action: z13.object({
3076
- type: z13.literal("search"),
3077
- query: z13.string().optional()
3152
+ z14.object({
3153
+ type: z14.literal("web_search_call"),
3154
+ id: z14.string(),
3155
+ status: z14.string(),
3156
+ action: z14.object({
3157
+ type: z14.literal("search"),
3158
+ query: z14.string().optional()
3078
3159
  }).nullish()
3079
3160
  }),
3080
- z13.object({
3081
- type: z13.literal("computer_call"),
3082
- id: z13.string(),
3083
- status: z13.string()
3161
+ z14.object({
3162
+ type: z14.literal("computer_call"),
3163
+ id: z14.string(),
3164
+ status: z14.string()
3084
3165
  }),
3085
- z13.object({
3086
- type: z13.literal("file_search_call"),
3087
- id: z13.string(),
3088
- status: z13.string(),
3089
- queries: z13.array(z13.string()).nullish(),
3090
- results: z13.array(
3091
- z13.object({
3092
- attributes: z13.object({
3093
- file_id: z13.string(),
3094
- filename: z13.string(),
3095
- score: z13.number(),
3096
- text: z13.string()
3166
+ z14.object({
3167
+ type: z14.literal("file_search_call"),
3168
+ id: z14.string(),
3169
+ status: z14.string(),
3170
+ queries: z14.array(z14.string()).nullish(),
3171
+ results: z14.array(
3172
+ z14.object({
3173
+ attributes: z14.object({
3174
+ file_id: z14.string(),
3175
+ filename: z14.string(),
3176
+ score: z14.number(),
3177
+ text: z14.string()
3097
3178
  })
3098
3179
  })
3099
3180
  ).optional()
3100
3181
  })
3101
3182
  ])
3102
3183
  });
3103
- var responseOutputItemDoneSchema = z13.object({
3104
- type: z13.literal("response.output_item.done"),
3105
- output_index: z13.number(),
3106
- item: z13.discriminatedUnion("type", [
3107
- z13.object({
3108
- type: z13.literal("message"),
3109
- id: z13.string()
3184
+ var responseOutputItemDoneSchema = z14.object({
3185
+ type: z14.literal("response.output_item.done"),
3186
+ output_index: z14.number(),
3187
+ item: z14.discriminatedUnion("type", [
3188
+ z14.object({
3189
+ type: z14.literal("message"),
3190
+ id: z14.string()
3110
3191
  }),
3111
- z13.object({
3112
- type: z13.literal("reasoning"),
3113
- id: z13.string(),
3114
- encrypted_content: z13.string().nullish()
3192
+ z14.object({
3193
+ type: z14.literal("reasoning"),
3194
+ id: z14.string(),
3195
+ encrypted_content: z14.string().nullish()
3115
3196
  }),
3116
- z13.object({
3117
- type: z13.literal("function_call"),
3118
- id: z13.string(),
3119
- call_id: z13.string(),
3120
- name: z13.string(),
3121
- arguments: z13.string(),
3122
- status: z13.literal("completed")
3197
+ z14.object({
3198
+ type: z14.literal("function_call"),
3199
+ id: z14.string(),
3200
+ call_id: z14.string(),
3201
+ name: z14.string(),
3202
+ arguments: z14.string(),
3203
+ status: z14.literal("completed")
3123
3204
  }),
3124
3205
  webSearchCallItem,
3125
- z13.object({
3126
- type: z13.literal("computer_call"),
3127
- id: z13.string(),
3128
- status: z13.literal("completed")
3206
+ z14.object({
3207
+ type: z14.literal("computer_call"),
3208
+ id: z14.string(),
3209
+ status: z14.literal("completed")
3129
3210
  }),
3130
- z13.object({
3131
- type: z13.literal("file_search_call"),
3132
- id: z13.string(),
3133
- status: z13.literal("completed"),
3134
- queries: z13.array(z13.string()).nullish(),
3135
- results: z13.array(
3136
- z13.object({
3137
- attributes: z13.object({
3138
- file_id: z13.string(),
3139
- filename: z13.string(),
3140
- score: z13.number(),
3141
- text: z13.string()
3211
+ z14.object({
3212
+ type: z14.literal("file_search_call"),
3213
+ id: z14.string(),
3214
+ status: z14.literal("completed"),
3215
+ queries: z14.array(z14.string()).nullish(),
3216
+ results: z14.array(
3217
+ z14.object({
3218
+ attributes: z14.object({
3219
+ file_id: z14.string(),
3220
+ filename: z14.string(),
3221
+ score: z14.number(),
3222
+ text: z14.string()
3142
3223
  })
3143
3224
  })
3144
3225
  ).nullish()
3145
3226
  })
3146
3227
  ])
3147
3228
  });
3148
- var responseFunctionCallArgumentsDeltaSchema = z13.object({
3149
- type: z13.literal("response.function_call_arguments.delta"),
3150
- item_id: z13.string(),
3151
- output_index: z13.number(),
3152
- delta: z13.string()
3229
+ var responseFunctionCallArgumentsDeltaSchema = z14.object({
3230
+ type: z14.literal("response.function_call_arguments.delta"),
3231
+ item_id: z14.string(),
3232
+ output_index: z14.number(),
3233
+ delta: z14.string()
3153
3234
  });
3154
- var responseAnnotationAddedSchema = z13.object({
3155
- type: z13.literal("response.output_text.annotation.added"),
3156
- annotation: z13.discriminatedUnion("type", [
3157
- z13.object({
3158
- type: z13.literal("url_citation"),
3159
- url: z13.string(),
3160
- title: z13.string()
3235
+ var responseAnnotationAddedSchema = z14.object({
3236
+ type: z14.literal("response.output_text.annotation.added"),
3237
+ annotation: z14.discriminatedUnion("type", [
3238
+ z14.object({
3239
+ type: z14.literal("url_citation"),
3240
+ url: z14.string(),
3241
+ title: z14.string()
3161
3242
  }),
3162
- z13.object({
3163
- type: z13.literal("file_citation"),
3164
- file_id: z13.string(),
3165
- filename: z13.string().nullish(),
3166
- index: z13.number().nullish(),
3167
- start_index: z13.number().nullish(),
3168
- end_index: z13.number().nullish(),
3169
- quote: z13.string().nullish()
3243
+ z14.object({
3244
+ type: z14.literal("file_citation"),
3245
+ file_id: z14.string(),
3246
+ filename: z14.string().nullish(),
3247
+ index: z14.number().nullish(),
3248
+ start_index: z14.number().nullish(),
3249
+ end_index: z14.number().nullish(),
3250
+ quote: z14.string().nullish()
3170
3251
  })
3171
3252
  ])
3172
3253
  });
3173
- var responseReasoningSummaryPartAddedSchema = z13.object({
3174
- type: z13.literal("response.reasoning_summary_part.added"),
3175
- item_id: z13.string(),
3176
- summary_index: z13.number()
3254
+ var responseReasoningSummaryPartAddedSchema = z14.object({
3255
+ type: z14.literal("response.reasoning_summary_part.added"),
3256
+ item_id: z14.string(),
3257
+ summary_index: z14.number()
3177
3258
  });
3178
- var responseReasoningSummaryTextDeltaSchema = z13.object({
3179
- type: z13.literal("response.reasoning_summary_text.delta"),
3180
- item_id: z13.string(),
3181
- summary_index: z13.number(),
3182
- delta: z13.string()
3259
+ var responseReasoningSummaryTextDeltaSchema = z14.object({
3260
+ type: z14.literal("response.reasoning_summary_text.delta"),
3261
+ item_id: z14.string(),
3262
+ summary_index: z14.number(),
3263
+ delta: z14.string()
3183
3264
  });
3184
- var openaiResponsesChunkSchema = z13.union([
3265
+ var openaiResponsesChunkSchema = z14.union([
3185
3266
  textDeltaChunkSchema,
3186
3267
  responseFinishedChunkSchema,
3187
3268
  responseCreatedChunkSchema,
@@ -3192,7 +3273,7 @@ var openaiResponsesChunkSchema = z13.union([
3192
3273
  responseReasoningSummaryPartAddedSchema,
3193
3274
  responseReasoningSummaryTextDeltaSchema,
3194
3275
  errorChunkSchema,
3195
- z13.object({ type: z13.string() }).loose()
3276
+ z14.object({ type: z14.string() }).loose()
3196
3277
  // fallback for unknown chunks
3197
3278
  ]);
3198
3279
  function isTextDeltaChunk(chunk) {
@@ -3265,27 +3346,27 @@ function getResponsesModelConfig(modelId) {
3265
3346
  isReasoningModel: false
3266
3347
  };
3267
3348
  }
3268
- var openaiResponsesProviderOptionsSchema = z13.object({
3269
- metadata: z13.any().nullish(),
3270
- parallelToolCalls: z13.boolean().nullish(),
3271
- previousResponseId: z13.string().nullish(),
3272
- store: z13.boolean().nullish(),
3273
- user: z13.string().nullish(),
3274
- reasoningEffort: z13.string().nullish(),
3275
- strictJsonSchema: z13.boolean().nullish(),
3276
- instructions: z13.string().nullish(),
3277
- reasoningSummary: z13.string().nullish(),
3278
- serviceTier: z13.enum(["auto", "flex", "priority"]).nullish(),
3279
- include: z13.array(
3280
- z13.enum([
3349
+ var openaiResponsesProviderOptionsSchema = z14.object({
3350
+ metadata: z14.any().nullish(),
3351
+ parallelToolCalls: z14.boolean().nullish(),
3352
+ previousResponseId: z14.string().nullish(),
3353
+ store: z14.boolean().nullish(),
3354
+ user: z14.string().nullish(),
3355
+ reasoningEffort: z14.string().nullish(),
3356
+ strictJsonSchema: z14.boolean().nullish(),
3357
+ instructions: z14.string().nullish(),
3358
+ reasoningSummary: z14.string().nullish(),
3359
+ serviceTier: z14.enum(["auto", "flex", "priority"]).nullish(),
3360
+ include: z14.array(
3361
+ z14.enum([
3281
3362
  "reasoning.encrypted_content",
3282
3363
  "file_search_call.results",
3283
3364
  "message.output_text.logprobs"
3284
3365
  ])
3285
3366
  ).nullish(),
3286
- textVerbosity: z13.enum(["low", "medium", "high"]).nullish(),
3287
- promptCacheKey: z13.string().nullish(),
3288
- safetyIdentifier: z13.string().nullish(),
3367
+ textVerbosity: z14.enum(["low", "medium", "high"]).nullish(),
3368
+ promptCacheKey: z14.string().nullish(),
3369
+ safetyIdentifier: z14.string().nullish(),
3289
3370
  /**
3290
3371
  * Return the log probabilities of the tokens.
3291
3372
  *
@@ -3298,7 +3379,7 @@ var openaiResponsesProviderOptionsSchema = z13.object({
3298
3379
  * @see https://platform.openai.com/docs/api-reference/responses/create
3299
3380
  * @see https://cookbook.openai.com/examples/using_logprobs
3300
3381
  */
3301
- logprobs: z13.union([z13.boolean(), z13.number().min(1).max(TOP_LOGPROBS_MAX)]).optional()
3382
+ logprobs: z14.union([z14.boolean(), z14.number().min(1).max(TOP_LOGPROBS_MAX)]).optional()
3302
3383
  });
3303
3384
 
3304
3385
  // src/speech/openai-speech-model.ts
@@ -3308,10 +3389,10 @@ import {
3308
3389
  parseProviderOptions as parseProviderOptions6,
3309
3390
  postJsonToApi as postJsonToApi6
3310
3391
  } from "@ai-sdk/provider-utils";
3311
- import { z as z14 } from "zod/v4";
3312
- var OpenAIProviderOptionsSchema = z14.object({
3313
- instructions: z14.string().nullish(),
3314
- speed: z14.number().min(0.25).max(4).default(1).nullish()
3392
+ import { z as z15 } from "zod/v4";
3393
+ var OpenAIProviderOptionsSchema = z15.object({
3394
+ instructions: z15.string().nullish(),
3395
+ speed: z15.number().min(0.25).max(4).default(1).nullish()
3315
3396
  });
3316
3397
  var OpenAISpeechModel = class {
3317
3398
  constructor(modelId, config) {
@@ -3422,33 +3503,33 @@ import {
3422
3503
  parseProviderOptions as parseProviderOptions7,
3423
3504
  postFormDataToApi
3424
3505
  } from "@ai-sdk/provider-utils";
3425
- import { z as z16 } from "zod/v4";
3506
+ import { z as z17 } from "zod/v4";
3426
3507
 
3427
3508
  // src/transcription/openai-transcription-options.ts
3428
- import { z as z15 } from "zod/v4";
3429
- var openAITranscriptionProviderOptions = z15.object({
3509
+ import { z as z16 } from "zod/v4";
3510
+ var openAITranscriptionProviderOptions = z16.object({
3430
3511
  /**
3431
3512
  * Additional information to include in the transcription response.
3432
3513
  */
3433
- include: z15.array(z15.string()).optional(),
3514
+ include: z16.array(z16.string()).optional(),
3434
3515
  /**
3435
3516
  * The language of the input audio in ISO-639-1 format.
3436
3517
  */
3437
- language: z15.string().optional(),
3518
+ language: z16.string().optional(),
3438
3519
  /**
3439
3520
  * An optional text to guide the model's style or continue a previous audio segment.
3440
3521
  */
3441
- prompt: z15.string().optional(),
3522
+ prompt: z16.string().optional(),
3442
3523
  /**
3443
3524
  * The sampling temperature, between 0 and 1.
3444
3525
  * @default 0
3445
3526
  */
3446
- temperature: z15.number().min(0).max(1).default(0).optional(),
3527
+ temperature: z16.number().min(0).max(1).default(0).optional(),
3447
3528
  /**
3448
3529
  * The timestamp granularities to populate for this transcription.
3449
3530
  * @default ['segment']
3450
3531
  */
3451
- timestampGranularities: z15.array(z15.enum(["word", "segment"])).default(["segment"]).optional()
3532
+ timestampGranularities: z16.array(z16.enum(["word", "segment"])).default(["segment"]).optional()
3452
3533
  });
3453
3534
 
3454
3535
  // src/transcription/openai-transcription-model.ts
@@ -3617,29 +3698,29 @@ var OpenAITranscriptionModel = class {
3617
3698
  };
3618
3699
  }
3619
3700
  };
3620
- var openaiTranscriptionResponseSchema = z16.object({
3621
- text: z16.string(),
3622
- language: z16.string().nullish(),
3623
- duration: z16.number().nullish(),
3624
- words: z16.array(
3625
- z16.object({
3626
- word: z16.string(),
3627
- start: z16.number(),
3628
- end: z16.number()
3701
+ var openaiTranscriptionResponseSchema = z17.object({
3702
+ text: z17.string(),
3703
+ language: z17.string().nullish(),
3704
+ duration: z17.number().nullish(),
3705
+ words: z17.array(
3706
+ z17.object({
3707
+ word: z17.string(),
3708
+ start: z17.number(),
3709
+ end: z17.number()
3629
3710
  })
3630
3711
  ).nullish(),
3631
- segments: z16.array(
3632
- z16.object({
3633
- id: z16.number(),
3634
- seek: z16.number(),
3635
- start: z16.number(),
3636
- end: z16.number(),
3637
- text: z16.string(),
3638
- tokens: z16.array(z16.number()),
3639
- temperature: z16.number(),
3640
- avg_logprob: z16.number(),
3641
- compression_ratio: z16.number(),
3642
- no_speech_prob: z16.number()
3712
+ segments: z17.array(
3713
+ z17.object({
3714
+ id: z17.number(),
3715
+ seek: z17.number(),
3716
+ start: z17.number(),
3717
+ end: z17.number(),
3718
+ text: z17.string(),
3719
+ tokens: z17.array(z17.number()),
3720
+ temperature: z17.number(),
3721
+ avg_logprob: z17.number(),
3722
+ compression_ratio: z17.number(),
3723
+ no_speech_prob: z17.number()
3643
3724
  })
3644
3725
  ).nullish()
3645
3726
  });