@ai-sdk/openai 2.1.0-beta.1 → 2.1.0-beta.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -1,7 +1,9 @@
1
1
  // src/openai-provider.ts
2
2
  import {
3
3
  loadApiKey,
4
- withoutTrailingSlash
4
+ loadOptionalSetting,
5
+ withoutTrailingSlash,
6
+ withUserAgentSuffix
5
7
  } from "@ai-sdk/provider-utils";
6
8
 
7
9
  // src/chat/openai-chat-language-model.ts
@@ -250,7 +252,7 @@ function mapOpenAIFinishReason(finishReason) {
250
252
 
251
253
  // src/chat/openai-chat-options.ts
252
254
  import { z as z2 } from "zod/v4";
253
- var openaiProviderOptions = z2.object({
255
+ var openaiChatLanguageModelOptions = z2.object({
254
256
  /**
255
257
  * Modify the likelihood of specified tokens appearing in the completion.
256
258
  *
@@ -404,7 +406,7 @@ function prepareChatTools({
404
406
  // src/chat/openai-chat-language-model.ts
405
407
  var OpenAIChatLanguageModel = class {
406
408
  constructor(modelId, config) {
407
- this.specificationVersion = "v2";
409
+ this.specificationVersion = "v3";
408
410
  this.supportedUrls = {
409
411
  "image/*": [/^https?:\/\/.*$/]
410
412
  };
@@ -434,7 +436,7 @@ var OpenAIChatLanguageModel = class {
434
436
  const openaiOptions = (_a = await parseProviderOptions({
435
437
  provider: "openai",
436
438
  providerOptions,
437
- schema: openaiProviderOptions
439
+ schema: openaiChatLanguageModelOptions
438
440
  })) != null ? _a : {};
439
441
  const structuredOutputs = (_b = openaiOptions.structuredOutputs) != null ? _b : true;
440
442
  if (topK != null) {
@@ -1222,7 +1224,7 @@ var openaiCompletionProviderOptions = z4.object({
1222
1224
  // src/completion/openai-completion-language-model.ts
1223
1225
  var OpenAICompletionLanguageModel = class {
1224
1226
  constructor(modelId, config) {
1225
- this.specificationVersion = "v2";
1227
+ this.specificationVersion = "v3";
1226
1228
  this.supportedUrls = {
1227
1229
  // No URLs are supported for completion models.
1228
1230
  };
@@ -1522,7 +1524,7 @@ var openaiEmbeddingProviderOptions = z6.object({
1522
1524
  // src/embedding/openai-embedding-model.ts
1523
1525
  var OpenAIEmbeddingModel = class {
1524
1526
  constructor(modelId, config) {
1525
- this.specificationVersion = "v2";
1527
+ this.specificationVersion = "v3";
1526
1528
  this.maxEmbeddingsPerCall = 2048;
1527
1529
  this.supportsParallelCalls = true;
1528
1530
  this.modelId = modelId;
@@ -1608,7 +1610,7 @@ var OpenAIImageModel = class {
1608
1610
  constructor(modelId, config) {
1609
1611
  this.modelId = modelId;
1610
1612
  this.config = config;
1611
- this.specificationVersion = "v2";
1613
+ this.specificationVersion = "v3";
1612
1614
  }
1613
1615
  get maxImagesPerCall() {
1614
1616
  var _a;
@@ -1792,39 +1794,62 @@ var imageGeneration = (args = {}) => {
1792
1794
  return imageGenerationToolFactory(args);
1793
1795
  };
1794
1796
 
1797
+ // src/tool/local-shell.ts
1798
+ import { createProviderDefinedToolFactoryWithOutputSchema as createProviderDefinedToolFactoryWithOutputSchema4 } from "@ai-sdk/provider-utils";
1799
+ import { z as z12 } from "zod/v4";
1800
+ var localShellInputSchema = z12.object({
1801
+ action: z12.object({
1802
+ type: z12.literal("exec"),
1803
+ command: z12.array(z12.string()),
1804
+ timeoutMs: z12.number().optional(),
1805
+ user: z12.string().optional(),
1806
+ workingDirectory: z12.string().optional(),
1807
+ env: z12.record(z12.string(), z12.string()).optional()
1808
+ })
1809
+ });
1810
+ var localShellOutputSchema = z12.object({
1811
+ output: z12.string()
1812
+ });
1813
+ var localShell = createProviderDefinedToolFactoryWithOutputSchema4({
1814
+ id: "openai.local_shell",
1815
+ name: "local_shell",
1816
+ inputSchema: localShellInputSchema,
1817
+ outputSchema: localShellOutputSchema
1818
+ });
1819
+
1795
1820
  // src/tool/web-search.ts
1796
1821
  import { createProviderDefinedToolFactory } from "@ai-sdk/provider-utils";
1797
- import { z as z12 } from "zod/v4";
1798
- var webSearchArgsSchema = z12.object({
1799
- filters: z12.object({
1800
- allowedDomains: z12.array(z12.string()).optional()
1822
+ import { z as z13 } from "zod/v4";
1823
+ var webSearchArgsSchema = z13.object({
1824
+ filters: z13.object({
1825
+ allowedDomains: z13.array(z13.string()).optional()
1801
1826
  }).optional(),
1802
- searchContextSize: z12.enum(["low", "medium", "high"]).optional(),
1803
- userLocation: z12.object({
1804
- type: z12.literal("approximate"),
1805
- country: z12.string().optional(),
1806
- city: z12.string().optional(),
1807
- region: z12.string().optional(),
1808
- timezone: z12.string().optional()
1827
+ searchContextSize: z13.enum(["low", "medium", "high"]).optional(),
1828
+ userLocation: z13.object({
1829
+ type: z13.literal("approximate"),
1830
+ country: z13.string().optional(),
1831
+ city: z13.string().optional(),
1832
+ region: z13.string().optional(),
1833
+ timezone: z13.string().optional()
1809
1834
  }).optional()
1810
1835
  });
1811
1836
  var webSearchToolFactory = createProviderDefinedToolFactory({
1812
1837
  id: "openai.web_search",
1813
1838
  name: "web_search",
1814
- inputSchema: z12.object({
1815
- action: z12.discriminatedUnion("type", [
1816
- z12.object({
1817
- type: z12.literal("search"),
1818
- query: z12.string().nullish()
1839
+ inputSchema: z13.object({
1840
+ action: z13.discriminatedUnion("type", [
1841
+ z13.object({
1842
+ type: z13.literal("search"),
1843
+ query: z13.string().nullish()
1819
1844
  }),
1820
- z12.object({
1821
- type: z12.literal("open_page"),
1822
- url: z12.string()
1845
+ z13.object({
1846
+ type: z13.literal("open_page"),
1847
+ url: z13.string()
1823
1848
  }),
1824
- z12.object({
1825
- type: z12.literal("find"),
1826
- url: z12.string(),
1827
- pattern: z12.string()
1849
+ z13.object({
1850
+ type: z13.literal("find"),
1851
+ url: z13.string(),
1852
+ pattern: z13.string()
1828
1853
  })
1829
1854
  ]).nullish()
1830
1855
  })
@@ -1835,58 +1860,58 @@ var webSearch = (args = {}) => {
1835
1860
 
1836
1861
  // src/tool/web-search-preview.ts
1837
1862
  import { createProviderDefinedToolFactory as createProviderDefinedToolFactory2 } from "@ai-sdk/provider-utils";
1838
- import { z as z13 } from "zod/v4";
1839
- var webSearchPreviewArgsSchema = z13.object({
1863
+ import { z as z14 } from "zod/v4";
1864
+ var webSearchPreviewArgsSchema = z14.object({
1840
1865
  /**
1841
1866
  * Search context size to use for the web search.
1842
1867
  * - high: Most comprehensive context, highest cost, slower response
1843
1868
  * - medium: Balanced context, cost, and latency (default)
1844
1869
  * - low: Least context, lowest cost, fastest response
1845
1870
  */
1846
- searchContextSize: z13.enum(["low", "medium", "high"]).optional(),
1871
+ searchContextSize: z14.enum(["low", "medium", "high"]).optional(),
1847
1872
  /**
1848
1873
  * User location information to provide geographically relevant search results.
1849
1874
  */
1850
- userLocation: z13.object({
1875
+ userLocation: z14.object({
1851
1876
  /**
1852
1877
  * Type of location (always 'approximate')
1853
1878
  */
1854
- type: z13.literal("approximate"),
1879
+ type: z14.literal("approximate"),
1855
1880
  /**
1856
1881
  * Two-letter ISO country code (e.g., 'US', 'GB')
1857
1882
  */
1858
- country: z13.string().optional(),
1883
+ country: z14.string().optional(),
1859
1884
  /**
1860
1885
  * City name (free text, e.g., 'Minneapolis')
1861
1886
  */
1862
- city: z13.string().optional(),
1887
+ city: z14.string().optional(),
1863
1888
  /**
1864
1889
  * Region name (free text, e.g., 'Minnesota')
1865
1890
  */
1866
- region: z13.string().optional(),
1891
+ region: z14.string().optional(),
1867
1892
  /**
1868
1893
  * IANA timezone (e.g., 'America/Chicago')
1869
1894
  */
1870
- timezone: z13.string().optional()
1895
+ timezone: z14.string().optional()
1871
1896
  }).optional()
1872
1897
  });
1873
1898
  var webSearchPreview = createProviderDefinedToolFactory2({
1874
1899
  id: "openai.web_search_preview",
1875
1900
  name: "web_search_preview",
1876
- inputSchema: z13.object({
1877
- action: z13.discriminatedUnion("type", [
1878
- z13.object({
1879
- type: z13.literal("search"),
1880
- query: z13.string().nullish()
1901
+ inputSchema: z14.object({
1902
+ action: z14.discriminatedUnion("type", [
1903
+ z14.object({
1904
+ type: z14.literal("search"),
1905
+ query: z14.string().nullish()
1881
1906
  }),
1882
- z13.object({
1883
- type: z13.literal("open_page"),
1884
- url: z13.string()
1907
+ z14.object({
1908
+ type: z14.literal("open_page"),
1909
+ url: z14.string()
1885
1910
  }),
1886
- z13.object({
1887
- type: z13.literal("find"),
1888
- url: z13.string(),
1889
- pattern: z13.string()
1911
+ z14.object({
1912
+ type: z14.literal("find"),
1913
+ url: z14.string(),
1914
+ pattern: z14.string()
1890
1915
  })
1891
1916
  ]).nullish()
1892
1917
  })
@@ -1931,6 +1956,15 @@ var openaiTools = {
1931
1956
  * @param background - Transparent or opaque
1932
1957
  */
1933
1958
  imageGeneration,
1959
+ /**
1960
+ * Local shell is a tool that allows agents to run shell commands locally
1961
+ * on a machine you or the user provides.
1962
+ *
1963
+ * Supported models: `gpt-5-codex` and `codex-mini-latest`
1964
+ *
1965
+ * Must have name `local_shell`.
1966
+ */
1967
+ localShell,
1934
1968
  /**
1935
1969
  * Web search allows models to access up-to-date information from the internet
1936
1970
  * and provide answers with sourced citations.
@@ -1968,14 +2002,14 @@ import {
1968
2002
  parseProviderOptions as parseProviderOptions5,
1969
2003
  postJsonToApi as postJsonToApi5
1970
2004
  } from "@ai-sdk/provider-utils";
1971
- import { z as z15 } from "zod/v4";
2005
+ import { z as z16 } from "zod/v4";
1972
2006
 
1973
2007
  // src/responses/convert-to-openai-responses-input.ts
1974
2008
  import {
1975
2009
  UnsupportedFunctionalityError as UnsupportedFunctionalityError4
1976
2010
  } from "@ai-sdk/provider";
1977
2011
  import { convertToBase64 as convertToBase642, parseProviderOptions as parseProviderOptions4 } from "@ai-sdk/provider-utils";
1978
- import { z as z14 } from "zod/v4";
2012
+ import { z as z15 } from "zod/v4";
1979
2013
  function isFileId(data, prefixes) {
1980
2014
  if (!prefixes) return false;
1981
2015
  return prefixes.some((prefix) => data.startsWith(prefix));
@@ -1984,9 +2018,10 @@ async function convertToOpenAIResponsesInput({
1984
2018
  prompt,
1985
2019
  systemMessageMode,
1986
2020
  fileIdPrefixes,
1987
- store
2021
+ store,
2022
+ hasLocalShellTool = false
1988
2023
  }) {
1989
- var _a, _b, _c, _d, _e, _f;
2024
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i;
1990
2025
  const input = [];
1991
2026
  const warnings = [];
1992
2027
  for (const { role, content } of prompt) {
@@ -2079,12 +2114,29 @@ async function convertToOpenAIResponsesInput({
2079
2114
  if (part.providerExecuted) {
2080
2115
  break;
2081
2116
  }
2117
+ if (hasLocalShellTool && part.toolName === "local_shell") {
2118
+ const parsedInput = localShellInputSchema.parse(part.input);
2119
+ input.push({
2120
+ type: "local_shell_call",
2121
+ call_id: part.toolCallId,
2122
+ id: (_f = (_e = (_d = part.providerOptions) == null ? void 0 : _d.openai) == null ? void 0 : _e.itemId) != null ? _f : void 0,
2123
+ action: {
2124
+ type: "exec",
2125
+ command: parsedInput.action.command,
2126
+ timeout_ms: parsedInput.action.timeoutMs,
2127
+ user: parsedInput.action.user,
2128
+ working_directory: parsedInput.action.workingDirectory,
2129
+ env: parsedInput.action.env
2130
+ }
2131
+ });
2132
+ break;
2133
+ }
2082
2134
  input.push({
2083
2135
  type: "function_call",
2084
2136
  call_id: part.toolCallId,
2085
2137
  name: part.toolName,
2086
2138
  arguments: JSON.stringify(part.input),
2087
- id: (_f = (_e = (_d = part.providerOptions) == null ? void 0 : _d.openai) == null ? void 0 : _e.itemId) != null ? _f : void 0
2139
+ id: (_i = (_h = (_g = part.providerOptions) == null ? void 0 : _g.openai) == null ? void 0 : _h.itemId) != null ? _i : void 0
2088
2140
  });
2089
2141
  break;
2090
2142
  }
@@ -2108,26 +2160,40 @@ async function convertToOpenAIResponsesInput({
2108
2160
  });
2109
2161
  const reasoningId = providerOptions == null ? void 0 : providerOptions.itemId;
2110
2162
  if (reasoningId != null) {
2111
- const existingReasoningMessage = reasoningMessages[reasoningId];
2112
- const summaryParts = [];
2113
- if (part.text.length > 0) {
2114
- summaryParts.push({ type: "summary_text", text: part.text });
2115
- } else if (existingReasoningMessage !== void 0) {
2116
- warnings.push({
2117
- type: "other",
2118
- message: `Cannot append empty reasoning part to existing reasoning sequence. Skipping reasoning part: ${JSON.stringify(part)}.`
2119
- });
2120
- }
2121
- if (existingReasoningMessage === void 0) {
2122
- reasoningMessages[reasoningId] = {
2123
- type: "reasoning",
2124
- id: reasoningId,
2125
- encrypted_content: providerOptions == null ? void 0 : providerOptions.reasoningEncryptedContent,
2126
- summary: summaryParts
2127
- };
2128
- input.push(reasoningMessages[reasoningId]);
2163
+ const reasoningMessage = reasoningMessages[reasoningId];
2164
+ if (store) {
2165
+ if (reasoningMessage === void 0) {
2166
+ input.push({ type: "item_reference", id: reasoningId });
2167
+ reasoningMessages[reasoningId] = {
2168
+ type: "reasoning",
2169
+ id: reasoningId,
2170
+ summary: []
2171
+ };
2172
+ }
2129
2173
  } else {
2130
- existingReasoningMessage.summary.push(...summaryParts);
2174
+ const summaryParts = [];
2175
+ if (part.text.length > 0) {
2176
+ summaryParts.push({
2177
+ type: "summary_text",
2178
+ text: part.text
2179
+ });
2180
+ } else if (reasoningMessage !== void 0) {
2181
+ warnings.push({
2182
+ type: "other",
2183
+ message: `Cannot append empty reasoning part to existing reasoning sequence. Skipping reasoning part: ${JSON.stringify(part)}.`
2184
+ });
2185
+ }
2186
+ if (reasoningMessage === void 0) {
2187
+ reasoningMessages[reasoningId] = {
2188
+ type: "reasoning",
2189
+ id: reasoningId,
2190
+ encrypted_content: providerOptions == null ? void 0 : providerOptions.reasoningEncryptedContent,
2191
+ summary: summaryParts
2192
+ };
2193
+ input.push(reasoningMessages[reasoningId]);
2194
+ } else {
2195
+ reasoningMessage.summary.push(...summaryParts);
2196
+ }
2131
2197
  }
2132
2198
  } else {
2133
2199
  warnings.push({
@@ -2144,6 +2210,14 @@ async function convertToOpenAIResponsesInput({
2144
2210
  case "tool": {
2145
2211
  for (const part of content) {
2146
2212
  const output = part.output;
2213
+ if (hasLocalShellTool && part.toolName === "local_shell" && output.type === "json") {
2214
+ input.push({
2215
+ type: "local_shell_call_output",
2216
+ call_id: part.toolCallId,
2217
+ output: localShellOutputSchema.parse(output.value).output
2218
+ });
2219
+ break;
2220
+ }
2147
2221
  let contentValue;
2148
2222
  switch (output.type) {
2149
2223
  case "text":
@@ -2172,9 +2246,9 @@ async function convertToOpenAIResponsesInput({
2172
2246
  }
2173
2247
  return { input, warnings };
2174
2248
  }
2175
- var openaiResponsesReasoningProviderOptionsSchema = z14.object({
2176
- itemId: z14.string().nullish(),
2177
- reasoningEncryptedContent: z14.string().nullish()
2249
+ var openaiResponsesReasoningProviderOptionsSchema = z15.object({
2250
+ itemId: z15.string().nullish(),
2251
+ reasoningEncryptedContent: z15.string().nullish()
2178
2252
  });
2179
2253
 
2180
2254
  // src/responses/map-openai-responses-finish-reason.ts
@@ -2237,6 +2311,12 @@ function prepareResponsesTools({
2237
2311
  });
2238
2312
  break;
2239
2313
  }
2314
+ case "openai.local_shell": {
2315
+ openaiTools2.push({
2316
+ type: "local_shell"
2317
+ });
2318
+ break;
2319
+ }
2240
2320
  case "openai.web_search_preview": {
2241
2321
  const args = webSearchPreviewArgsSchema.parse(tool.args);
2242
2322
  openaiTools2.push({
@@ -2316,73 +2396,86 @@ function prepareResponsesTools({
2316
2396
  }
2317
2397
 
2318
2398
  // src/responses/openai-responses-language-model.ts
2319
- var webSearchCallItem = z15.object({
2320
- type: z15.literal("web_search_call"),
2321
- id: z15.string(),
2322
- status: z15.string(),
2323
- action: z15.discriminatedUnion("type", [
2324
- z15.object({
2325
- type: z15.literal("search"),
2326
- query: z15.string().nullish()
2399
+ var webSearchCallItem = z16.object({
2400
+ type: z16.literal("web_search_call"),
2401
+ id: z16.string(),
2402
+ status: z16.string(),
2403
+ action: z16.discriminatedUnion("type", [
2404
+ z16.object({
2405
+ type: z16.literal("search"),
2406
+ query: z16.string().nullish()
2327
2407
  }),
2328
- z15.object({
2329
- type: z15.literal("open_page"),
2330
- url: z15.string()
2408
+ z16.object({
2409
+ type: z16.literal("open_page"),
2410
+ url: z16.string()
2331
2411
  }),
2332
- z15.object({
2333
- type: z15.literal("find"),
2334
- url: z15.string(),
2335
- pattern: z15.string()
2412
+ z16.object({
2413
+ type: z16.literal("find"),
2414
+ url: z16.string(),
2415
+ pattern: z16.string()
2336
2416
  })
2337
2417
  ]).nullish()
2338
2418
  });
2339
- var fileSearchCallItem = z15.object({
2340
- type: z15.literal("file_search_call"),
2341
- id: z15.string(),
2342
- queries: z15.array(z15.string()),
2343
- results: z15.array(
2344
- z15.object({
2345
- attributes: z15.record(z15.string(), z15.unknown()),
2346
- file_id: z15.string(),
2347
- filename: z15.string(),
2348
- score: z15.number(),
2349
- text: z15.string()
2419
+ var fileSearchCallItem = z16.object({
2420
+ type: z16.literal("file_search_call"),
2421
+ id: z16.string(),
2422
+ queries: z16.array(z16.string()),
2423
+ results: z16.array(
2424
+ z16.object({
2425
+ attributes: z16.record(z16.string(), z16.unknown()),
2426
+ file_id: z16.string(),
2427
+ filename: z16.string(),
2428
+ score: z16.number(),
2429
+ text: z16.string()
2350
2430
  })
2351
2431
  ).nullish()
2352
2432
  });
2353
- var codeInterpreterCallItem = z15.object({
2354
- type: z15.literal("code_interpreter_call"),
2355
- id: z15.string(),
2356
- code: z15.string().nullable(),
2357
- container_id: z15.string(),
2358
- outputs: z15.array(
2359
- z15.discriminatedUnion("type", [
2360
- z15.object({ type: z15.literal("logs"), logs: z15.string() }),
2361
- z15.object({ type: z15.literal("image"), url: z15.string() })
2433
+ var codeInterpreterCallItem = z16.object({
2434
+ type: z16.literal("code_interpreter_call"),
2435
+ id: z16.string(),
2436
+ code: z16.string().nullable(),
2437
+ container_id: z16.string(),
2438
+ outputs: z16.array(
2439
+ z16.discriminatedUnion("type", [
2440
+ z16.object({ type: z16.literal("logs"), logs: z16.string() }),
2441
+ z16.object({ type: z16.literal("image"), url: z16.string() })
2362
2442
  ])
2363
2443
  ).nullable()
2364
2444
  });
2365
- var imageGenerationCallItem = z15.object({
2366
- type: z15.literal("image_generation_call"),
2367
- id: z15.string(),
2368
- result: z15.string()
2445
+ var localShellCallItem = z16.object({
2446
+ type: z16.literal("local_shell_call"),
2447
+ id: z16.string(),
2448
+ call_id: z16.string(),
2449
+ action: z16.object({
2450
+ type: z16.literal("exec"),
2451
+ command: z16.array(z16.string()),
2452
+ timeout_ms: z16.number().optional(),
2453
+ user: z16.string().optional(),
2454
+ working_directory: z16.string().optional(),
2455
+ env: z16.record(z16.string(), z16.string()).optional()
2456
+ })
2457
+ });
2458
+ var imageGenerationCallItem = z16.object({
2459
+ type: z16.literal("image_generation_call"),
2460
+ id: z16.string(),
2461
+ result: z16.string()
2369
2462
  });
2370
2463
  var TOP_LOGPROBS_MAX = 20;
2371
- var LOGPROBS_SCHEMA = z15.array(
2372
- z15.object({
2373
- token: z15.string(),
2374
- logprob: z15.number(),
2375
- top_logprobs: z15.array(
2376
- z15.object({
2377
- token: z15.string(),
2378
- logprob: z15.number()
2464
+ var LOGPROBS_SCHEMA = z16.array(
2465
+ z16.object({
2466
+ token: z16.string(),
2467
+ logprob: z16.number(),
2468
+ top_logprobs: z16.array(
2469
+ z16.object({
2470
+ token: z16.string(),
2471
+ logprob: z16.number()
2379
2472
  })
2380
2473
  )
2381
2474
  })
2382
2475
  );
2383
2476
  var OpenAIResponsesLanguageModel = class {
2384
2477
  constructor(modelId, config) {
2385
- this.specificationVersion = "v2";
2478
+ this.specificationVersion = "v3";
2386
2479
  this.supportedUrls = {
2387
2480
  "image/*": [/^https?:\/\/.*$/],
2388
2481
  "application/pdf": [/^https?:\/\/.*$/]
@@ -2441,7 +2534,8 @@ var OpenAIResponsesLanguageModel = class {
2441
2534
  prompt,
2442
2535
  systemMessageMode: modelConfig.systemMessageMode,
2443
2536
  fileIdPrefixes: this.config.fileIdPrefixes,
2444
- store: (_a = openaiOptions == null ? void 0 : openaiOptions.store) != null ? _a : true
2537
+ store: (_a = openaiOptions == null ? void 0 : openaiOptions.store) != null ? _a : true,
2538
+ hasLocalShellTool: hasOpenAITool("openai.local_shell")
2445
2539
  });
2446
2540
  warnings.push(...inputWarnings);
2447
2541
  const strictJsonSchema = (_b = openaiOptions == null ? void 0 : openaiOptions.strictJsonSchema) != null ? _b : false;
@@ -2606,45 +2700,45 @@ var OpenAIResponsesLanguageModel = class {
2606
2700
  body,
2607
2701
  failedResponseHandler: openaiFailedResponseHandler,
2608
2702
  successfulResponseHandler: createJsonResponseHandler5(
2609
- z15.object({
2610
- id: z15.string(),
2611
- created_at: z15.number(),
2612
- error: z15.object({
2613
- code: z15.string(),
2614
- message: z15.string()
2703
+ z16.object({
2704
+ id: z16.string(),
2705
+ created_at: z16.number(),
2706
+ error: z16.object({
2707
+ code: z16.string(),
2708
+ message: z16.string()
2615
2709
  }).nullish(),
2616
- model: z15.string(),
2617
- output: z15.array(
2618
- z15.discriminatedUnion("type", [
2619
- z15.object({
2620
- type: z15.literal("message"),
2621
- role: z15.literal("assistant"),
2622
- id: z15.string(),
2623
- content: z15.array(
2624
- z15.object({
2625
- type: z15.literal("output_text"),
2626
- text: z15.string(),
2710
+ model: z16.string(),
2711
+ output: z16.array(
2712
+ z16.discriminatedUnion("type", [
2713
+ z16.object({
2714
+ type: z16.literal("message"),
2715
+ role: z16.literal("assistant"),
2716
+ id: z16.string(),
2717
+ content: z16.array(
2718
+ z16.object({
2719
+ type: z16.literal("output_text"),
2720
+ text: z16.string(),
2627
2721
  logprobs: LOGPROBS_SCHEMA.nullish(),
2628
- annotations: z15.array(
2629
- z15.discriminatedUnion("type", [
2630
- z15.object({
2631
- type: z15.literal("url_citation"),
2632
- start_index: z15.number(),
2633
- end_index: z15.number(),
2634
- url: z15.string(),
2635
- title: z15.string()
2722
+ annotations: z16.array(
2723
+ z16.discriminatedUnion("type", [
2724
+ z16.object({
2725
+ type: z16.literal("url_citation"),
2726
+ start_index: z16.number(),
2727
+ end_index: z16.number(),
2728
+ url: z16.string(),
2729
+ title: z16.string()
2636
2730
  }),
2637
- z15.object({
2638
- type: z15.literal("file_citation"),
2639
- file_id: z15.string(),
2640
- filename: z15.string().nullish(),
2641
- index: z15.number().nullish(),
2642
- start_index: z15.number().nullish(),
2643
- end_index: z15.number().nullish(),
2644
- quote: z15.string().nullish()
2731
+ z16.object({
2732
+ type: z16.literal("file_citation"),
2733
+ file_id: z16.string(),
2734
+ filename: z16.string().nullish(),
2735
+ index: z16.number().nullish(),
2736
+ start_index: z16.number().nullish(),
2737
+ end_index: z16.number().nullish(),
2738
+ quote: z16.string().nullish()
2645
2739
  }),
2646
- z15.object({
2647
- type: z15.literal("container_file_citation")
2740
+ z16.object({
2741
+ type: z16.literal("container_file_citation")
2648
2742
  })
2649
2743
  ])
2650
2744
  )
@@ -2655,33 +2749,34 @@ var OpenAIResponsesLanguageModel = class {
2655
2749
  fileSearchCallItem,
2656
2750
  codeInterpreterCallItem,
2657
2751
  imageGenerationCallItem,
2658
- z15.object({
2659
- type: z15.literal("function_call"),
2660
- call_id: z15.string(),
2661
- name: z15.string(),
2662
- arguments: z15.string(),
2663
- id: z15.string()
2752
+ localShellCallItem,
2753
+ z16.object({
2754
+ type: z16.literal("function_call"),
2755
+ call_id: z16.string(),
2756
+ name: z16.string(),
2757
+ arguments: z16.string(),
2758
+ id: z16.string()
2664
2759
  }),
2665
- z15.object({
2666
- type: z15.literal("computer_call"),
2667
- id: z15.string(),
2668
- status: z15.string().optional()
2760
+ z16.object({
2761
+ type: z16.literal("computer_call"),
2762
+ id: z16.string(),
2763
+ status: z16.string().optional()
2669
2764
  }),
2670
- z15.object({
2671
- type: z15.literal("reasoning"),
2672
- id: z15.string(),
2673
- encrypted_content: z15.string().nullish(),
2674
- summary: z15.array(
2675
- z15.object({
2676
- type: z15.literal("summary_text"),
2677
- text: z15.string()
2765
+ z16.object({
2766
+ type: z16.literal("reasoning"),
2767
+ id: z16.string(),
2768
+ encrypted_content: z16.string().nullish(),
2769
+ summary: z16.array(
2770
+ z16.object({
2771
+ type: z16.literal("summary_text"),
2772
+ text: z16.string()
2678
2773
  })
2679
2774
  )
2680
2775
  })
2681
2776
  ])
2682
2777
  ),
2683
- service_tier: z15.string().nullish(),
2684
- incomplete_details: z15.object({ reason: z15.string() }).nullable(),
2778
+ service_tier: z16.string().nullish(),
2779
+ incomplete_details: z16.object({ reason: z16.string() }).nullish(),
2685
2780
  usage: usageSchema2
2686
2781
  })
2687
2782
  ),
@@ -2741,6 +2836,20 @@ var OpenAIResponsesLanguageModel = class {
2741
2836
  });
2742
2837
  break;
2743
2838
  }
2839
+ case "local_shell_call": {
2840
+ content.push({
2841
+ type: "tool-call",
2842
+ toolCallId: part.call_id,
2843
+ toolName: "local_shell",
2844
+ input: JSON.stringify({ action: part.action }),
2845
+ providerMetadata: {
2846
+ openai: {
2847
+ itemId: part.id
2848
+ }
2849
+ }
2850
+ });
2851
+ break;
2852
+ }
2744
2853
  case "message": {
2745
2854
  for (const contentPart of part.content) {
2746
2855
  if (((_c = (_b = options.providerOptions) == null ? void 0 : _b.openai) == null ? void 0 : _c.logprobs) && contentPart.logprobs) {
@@ -2998,6 +3107,24 @@ var OpenAIResponsesLanguageModel = class {
2998
3107
  id: value.item.id,
2999
3108
  toolName: "computer_use"
3000
3109
  });
3110
+ } else if (value.item.type === "code_interpreter_call") {
3111
+ ongoingToolCalls[value.output_index] = {
3112
+ toolName: "code_interpreter",
3113
+ toolCallId: value.item.id,
3114
+ codeInterpreter: {
3115
+ containerId: value.item.container_id
3116
+ }
3117
+ };
3118
+ controller.enqueue({
3119
+ type: "tool-input-start",
3120
+ id: value.item.id,
3121
+ toolName: "code_interpreter"
3122
+ });
3123
+ controller.enqueue({
3124
+ type: "tool-input-delta",
3125
+ id: value.item.id,
3126
+ delta: `{"containerId":"${value.item.container_id}","code":"`
3127
+ });
3001
3128
  } else if (value.item.type === "file_search_call") {
3002
3129
  controller.enqueue({
3003
3130
  type: "tool-call",
@@ -3121,16 +3248,7 @@ var OpenAIResponsesLanguageModel = class {
3121
3248
  providerExecuted: true
3122
3249
  });
3123
3250
  } else if (value.item.type === "code_interpreter_call") {
3124
- controller.enqueue({
3125
- type: "tool-call",
3126
- toolCallId: value.item.id,
3127
- toolName: "code_interpreter",
3128
- input: JSON.stringify({
3129
- code: value.item.code,
3130
- containerId: value.item.container_id
3131
- }),
3132
- providerExecuted: true
3133
- });
3251
+ ongoingToolCalls[value.output_index] = void 0;
3134
3252
  controller.enqueue({
3135
3253
  type: "tool-result",
3136
3254
  toolCallId: value.item.id,
@@ -3150,6 +3268,26 @@ var OpenAIResponsesLanguageModel = class {
3150
3268
  },
3151
3269
  providerExecuted: true
3152
3270
  });
3271
+ } else if (value.item.type === "local_shell_call") {
3272
+ ongoingToolCalls[value.output_index] = void 0;
3273
+ controller.enqueue({
3274
+ type: "tool-call",
3275
+ toolCallId: value.item.call_id,
3276
+ toolName: "local_shell",
3277
+ input: JSON.stringify({
3278
+ action: {
3279
+ type: "exec",
3280
+ command: value.item.action.command,
3281
+ timeoutMs: value.item.action.timeout_ms,
3282
+ user: value.item.action.user,
3283
+ workingDirectory: value.item.action.working_directory,
3284
+ env: value.item.action.env
3285
+ }
3286
+ }),
3287
+ providerMetadata: {
3288
+ openai: { itemId: value.item.id }
3289
+ }
3290
+ });
3153
3291
  } else if (value.item.type === "message") {
3154
3292
  controller.enqueue({
3155
3293
  type: "text-end",
@@ -3180,6 +3318,40 @@ var OpenAIResponsesLanguageModel = class {
3180
3318
  delta: value.delta
3181
3319
  });
3182
3320
  }
3321
+ } else if (isResponseCodeInterpreterCallCodeDeltaChunk(value)) {
3322
+ const toolCall = ongoingToolCalls[value.output_index];
3323
+ if (toolCall != null) {
3324
+ controller.enqueue({
3325
+ type: "tool-input-delta",
3326
+ id: toolCall.toolCallId,
3327
+ // The delta is code, which is embedding in a JSON string.
3328
+ // To escape it, we use JSON.stringify and slice to remove the outer quotes.
3329
+ delta: JSON.stringify(value.delta).slice(1, -1)
3330
+ });
3331
+ }
3332
+ } else if (isResponseCodeInterpreterCallCodeDoneChunk(value)) {
3333
+ const toolCall = ongoingToolCalls[value.output_index];
3334
+ if (toolCall != null) {
3335
+ controller.enqueue({
3336
+ type: "tool-input-delta",
3337
+ id: toolCall.toolCallId,
3338
+ delta: '"}'
3339
+ });
3340
+ controller.enqueue({
3341
+ type: "tool-input-end",
3342
+ id: toolCall.toolCallId
3343
+ });
3344
+ controller.enqueue({
3345
+ type: "tool-call",
3346
+ toolCallId: toolCall.toolCallId,
3347
+ toolName: "code_interpreter",
3348
+ input: JSON.stringify({
3349
+ code: value.code,
3350
+ containerId: toolCall.codeInterpreter.containerId
3351
+ }),
3352
+ providerExecuted: true
3353
+ });
3354
+ }
3183
3355
  } else if (isResponseCreatedChunk(value)) {
3184
3356
  responseId = value.response.id;
3185
3357
  controller.enqueue({
@@ -3286,166 +3458,194 @@ var OpenAIResponsesLanguageModel = class {
3286
3458
  };
3287
3459
  }
3288
3460
  };
3289
- var usageSchema2 = z15.object({
3290
- input_tokens: z15.number(),
3291
- input_tokens_details: z15.object({ cached_tokens: z15.number().nullish() }).nullish(),
3292
- output_tokens: z15.number(),
3293
- output_tokens_details: z15.object({ reasoning_tokens: z15.number().nullish() }).nullish()
3461
+ var usageSchema2 = z16.object({
3462
+ input_tokens: z16.number(),
3463
+ input_tokens_details: z16.object({ cached_tokens: z16.number().nullish() }).nullish(),
3464
+ output_tokens: z16.number(),
3465
+ output_tokens_details: z16.object({ reasoning_tokens: z16.number().nullish() }).nullish()
3294
3466
  });
3295
- var textDeltaChunkSchema = z15.object({
3296
- type: z15.literal("response.output_text.delta"),
3297
- item_id: z15.string(),
3298
- delta: z15.string(),
3467
+ var textDeltaChunkSchema = z16.object({
3468
+ type: z16.literal("response.output_text.delta"),
3469
+ item_id: z16.string(),
3470
+ delta: z16.string(),
3299
3471
  logprobs: LOGPROBS_SCHEMA.nullish()
3300
3472
  });
3301
- var errorChunkSchema = z15.object({
3302
- type: z15.literal("error"),
3303
- code: z15.string(),
3304
- message: z15.string(),
3305
- param: z15.string().nullish(),
3306
- sequence_number: z15.number()
3473
+ var errorChunkSchema = z16.object({
3474
+ type: z16.literal("error"),
3475
+ code: z16.string(),
3476
+ message: z16.string(),
3477
+ param: z16.string().nullish(),
3478
+ sequence_number: z16.number()
3307
3479
  });
3308
- var responseFinishedChunkSchema = z15.object({
3309
- type: z15.enum(["response.completed", "response.incomplete"]),
3310
- response: z15.object({
3311
- incomplete_details: z15.object({ reason: z15.string() }).nullish(),
3480
+ var responseFinishedChunkSchema = z16.object({
3481
+ type: z16.enum(["response.completed", "response.incomplete"]),
3482
+ response: z16.object({
3483
+ incomplete_details: z16.object({ reason: z16.string() }).nullish(),
3312
3484
  usage: usageSchema2,
3313
- service_tier: z15.string().nullish()
3485
+ service_tier: z16.string().nullish()
3314
3486
  })
3315
3487
  });
3316
- var responseCreatedChunkSchema = z15.object({
3317
- type: z15.literal("response.created"),
3318
- response: z15.object({
3319
- id: z15.string(),
3320
- created_at: z15.number(),
3321
- model: z15.string(),
3322
- service_tier: z15.string().nullish()
3488
+ var responseCreatedChunkSchema = z16.object({
3489
+ type: z16.literal("response.created"),
3490
+ response: z16.object({
3491
+ id: z16.string(),
3492
+ created_at: z16.number(),
3493
+ model: z16.string(),
3494
+ service_tier: z16.string().nullish()
3323
3495
  })
3324
3496
  });
3325
- var responseOutputItemAddedSchema = z15.object({
3326
- type: z15.literal("response.output_item.added"),
3327
- output_index: z15.number(),
3328
- item: z15.discriminatedUnion("type", [
3329
- z15.object({
3330
- type: z15.literal("message"),
3331
- id: z15.string()
3497
+ var responseOutputItemAddedSchema = z16.object({
3498
+ type: z16.literal("response.output_item.added"),
3499
+ output_index: z16.number(),
3500
+ item: z16.discriminatedUnion("type", [
3501
+ z16.object({
3502
+ type: z16.literal("message"),
3503
+ id: z16.string()
3332
3504
  }),
3333
- z15.object({
3334
- type: z15.literal("reasoning"),
3335
- id: z15.string(),
3336
- encrypted_content: z15.string().nullish()
3505
+ z16.object({
3506
+ type: z16.literal("reasoning"),
3507
+ id: z16.string(),
3508
+ encrypted_content: z16.string().nullish()
3337
3509
  }),
3338
- z15.object({
3339
- type: z15.literal("function_call"),
3340
- id: z15.string(),
3341
- call_id: z15.string(),
3342
- name: z15.string(),
3343
- arguments: z15.string()
3510
+ z16.object({
3511
+ type: z16.literal("function_call"),
3512
+ id: z16.string(),
3513
+ call_id: z16.string(),
3514
+ name: z16.string(),
3515
+ arguments: z16.string()
3344
3516
  }),
3345
- z15.object({
3346
- type: z15.literal("web_search_call"),
3347
- id: z15.string(),
3348
- status: z15.string(),
3349
- action: z15.object({
3350
- type: z15.literal("search"),
3351
- query: z15.string().optional()
3517
+ z16.object({
3518
+ type: z16.literal("web_search_call"),
3519
+ id: z16.string(),
3520
+ status: z16.string(),
3521
+ action: z16.object({
3522
+ type: z16.literal("search"),
3523
+ query: z16.string().optional()
3352
3524
  }).nullish()
3353
3525
  }),
3354
- z15.object({
3355
- type: z15.literal("computer_call"),
3356
- id: z15.string(),
3357
- status: z15.string()
3526
+ z16.object({
3527
+ type: z16.literal("computer_call"),
3528
+ id: z16.string(),
3529
+ status: z16.string()
3358
3530
  }),
3359
- z15.object({
3360
- type: z15.literal("file_search_call"),
3361
- id: z15.string()
3531
+ z16.object({
3532
+ type: z16.literal("file_search_call"),
3533
+ id: z16.string()
3362
3534
  }),
3363
- z15.object({
3364
- type: z15.literal("image_generation_call"),
3365
- id: z15.string()
3535
+ z16.object({
3536
+ type: z16.literal("image_generation_call"),
3537
+ id: z16.string()
3538
+ }),
3539
+ z16.object({
3540
+ type: z16.literal("code_interpreter_call"),
3541
+ id: z16.string(),
3542
+ container_id: z16.string(),
3543
+ code: z16.string().nullable(),
3544
+ outputs: z16.array(
3545
+ z16.discriminatedUnion("type", [
3546
+ z16.object({ type: z16.literal("logs"), logs: z16.string() }),
3547
+ z16.object({ type: z16.literal("image"), url: z16.string() })
3548
+ ])
3549
+ ).nullable(),
3550
+ status: z16.string()
3366
3551
  })
3367
3552
  ])
3368
3553
  });
3369
- var responseOutputItemDoneSchema = z15.object({
3370
- type: z15.literal("response.output_item.done"),
3371
- output_index: z15.number(),
3372
- item: z15.discriminatedUnion("type", [
3373
- z15.object({
3374
- type: z15.literal("message"),
3375
- id: z15.string()
3554
+ var responseOutputItemDoneSchema = z16.object({
3555
+ type: z16.literal("response.output_item.done"),
3556
+ output_index: z16.number(),
3557
+ item: z16.discriminatedUnion("type", [
3558
+ z16.object({
3559
+ type: z16.literal("message"),
3560
+ id: z16.string()
3376
3561
  }),
3377
- z15.object({
3378
- type: z15.literal("reasoning"),
3379
- id: z15.string(),
3380
- encrypted_content: z15.string().nullish()
3562
+ z16.object({
3563
+ type: z16.literal("reasoning"),
3564
+ id: z16.string(),
3565
+ encrypted_content: z16.string().nullish()
3381
3566
  }),
3382
- z15.object({
3383
- type: z15.literal("function_call"),
3384
- id: z15.string(),
3385
- call_id: z15.string(),
3386
- name: z15.string(),
3387
- arguments: z15.string(),
3388
- status: z15.literal("completed")
3567
+ z16.object({
3568
+ type: z16.literal("function_call"),
3569
+ id: z16.string(),
3570
+ call_id: z16.string(),
3571
+ name: z16.string(),
3572
+ arguments: z16.string(),
3573
+ status: z16.literal("completed")
3389
3574
  }),
3390
3575
  codeInterpreterCallItem,
3391
3576
  imageGenerationCallItem,
3392
3577
  webSearchCallItem,
3393
3578
  fileSearchCallItem,
3394
- z15.object({
3395
- type: z15.literal("computer_call"),
3396
- id: z15.string(),
3397
- status: z15.literal("completed")
3579
+ localShellCallItem,
3580
+ z16.object({
3581
+ type: z16.literal("computer_call"),
3582
+ id: z16.string(),
3583
+ status: z16.literal("completed")
3398
3584
  })
3399
3585
  ])
3400
3586
  });
3401
- var responseFunctionCallArgumentsDeltaSchema = z15.object({
3402
- type: z15.literal("response.function_call_arguments.delta"),
3403
- item_id: z15.string(),
3404
- output_index: z15.number(),
3405
- delta: z15.string()
3587
+ var responseFunctionCallArgumentsDeltaSchema = z16.object({
3588
+ type: z16.literal("response.function_call_arguments.delta"),
3589
+ item_id: z16.string(),
3590
+ output_index: z16.number(),
3591
+ delta: z16.string()
3592
+ });
3593
+ var responseCodeInterpreterCallCodeDeltaSchema = z16.object({
3594
+ type: z16.literal("response.code_interpreter_call_code.delta"),
3595
+ item_id: z16.string(),
3596
+ output_index: z16.number(),
3597
+ delta: z16.string()
3598
+ });
3599
+ var responseCodeInterpreterCallCodeDoneSchema = z16.object({
3600
+ type: z16.literal("response.code_interpreter_call_code.done"),
3601
+ item_id: z16.string(),
3602
+ output_index: z16.number(),
3603
+ code: z16.string()
3406
3604
  });
3407
- var responseAnnotationAddedSchema = z15.object({
3408
- type: z15.literal("response.output_text.annotation.added"),
3409
- annotation: z15.discriminatedUnion("type", [
3410
- z15.object({
3411
- type: z15.literal("url_citation"),
3412
- url: z15.string(),
3413
- title: z15.string()
3605
+ var responseAnnotationAddedSchema = z16.object({
3606
+ type: z16.literal("response.output_text.annotation.added"),
3607
+ annotation: z16.discriminatedUnion("type", [
3608
+ z16.object({
3609
+ type: z16.literal("url_citation"),
3610
+ url: z16.string(),
3611
+ title: z16.string()
3414
3612
  }),
3415
- z15.object({
3416
- type: z15.literal("file_citation"),
3417
- file_id: z15.string(),
3418
- filename: z15.string().nullish(),
3419
- index: z15.number().nullish(),
3420
- start_index: z15.number().nullish(),
3421
- end_index: z15.number().nullish(),
3422
- quote: z15.string().nullish()
3613
+ z16.object({
3614
+ type: z16.literal("file_citation"),
3615
+ file_id: z16.string(),
3616
+ filename: z16.string().nullish(),
3617
+ index: z16.number().nullish(),
3618
+ start_index: z16.number().nullish(),
3619
+ end_index: z16.number().nullish(),
3620
+ quote: z16.string().nullish()
3423
3621
  })
3424
3622
  ])
3425
3623
  });
3426
- var responseReasoningSummaryPartAddedSchema = z15.object({
3427
- type: z15.literal("response.reasoning_summary_part.added"),
3428
- item_id: z15.string(),
3429
- summary_index: z15.number()
3624
+ var responseReasoningSummaryPartAddedSchema = z16.object({
3625
+ type: z16.literal("response.reasoning_summary_part.added"),
3626
+ item_id: z16.string(),
3627
+ summary_index: z16.number()
3430
3628
  });
3431
- var responseReasoningSummaryTextDeltaSchema = z15.object({
3432
- type: z15.literal("response.reasoning_summary_text.delta"),
3433
- item_id: z15.string(),
3434
- summary_index: z15.number(),
3435
- delta: z15.string()
3629
+ var responseReasoningSummaryTextDeltaSchema = z16.object({
3630
+ type: z16.literal("response.reasoning_summary_text.delta"),
3631
+ item_id: z16.string(),
3632
+ summary_index: z16.number(),
3633
+ delta: z16.string()
3436
3634
  });
3437
- var openaiResponsesChunkSchema = z15.union([
3635
+ var openaiResponsesChunkSchema = z16.union([
3438
3636
  textDeltaChunkSchema,
3439
3637
  responseFinishedChunkSchema,
3440
3638
  responseCreatedChunkSchema,
3441
3639
  responseOutputItemAddedSchema,
3442
3640
  responseOutputItemDoneSchema,
3443
3641
  responseFunctionCallArgumentsDeltaSchema,
3642
+ responseCodeInterpreterCallCodeDeltaSchema,
3643
+ responseCodeInterpreterCallCodeDoneSchema,
3444
3644
  responseAnnotationAddedSchema,
3445
3645
  responseReasoningSummaryPartAddedSchema,
3446
3646
  responseReasoningSummaryTextDeltaSchema,
3447
3647
  errorChunkSchema,
3448
- z15.object({ type: z15.string() }).loose()
3648
+ z16.object({ type: z16.string() }).loose()
3449
3649
  // fallback for unknown chunks
3450
3650
  ]);
3451
3651
  function isTextDeltaChunk(chunk) {
@@ -3466,6 +3666,12 @@ function isResponseCreatedChunk(chunk) {
3466
3666
  function isResponseFunctionCallArgumentsDeltaChunk(chunk) {
3467
3667
  return chunk.type === "response.function_call_arguments.delta";
3468
3668
  }
3669
+ function isResponseCodeInterpreterCallCodeDeltaChunk(chunk) {
3670
+ return chunk.type === "response.code_interpreter_call_code.delta";
3671
+ }
3672
+ function isResponseCodeInterpreterCallCodeDoneChunk(chunk) {
3673
+ return chunk.type === "response.code_interpreter_call_code.done";
3674
+ }
3469
3675
  function isResponseOutputItemAddedChunk(chunk) {
3470
3676
  return chunk.type === "response.output_item.added";
3471
3677
  }
@@ -3518,15 +3724,15 @@ function getResponsesModelConfig(modelId) {
3518
3724
  isReasoningModel: false
3519
3725
  };
3520
3726
  }
3521
- var openaiResponsesProviderOptionsSchema = z15.object({
3522
- include: z15.array(
3523
- z15.enum([
3727
+ var openaiResponsesProviderOptionsSchema = z16.object({
3728
+ include: z16.array(
3729
+ z16.enum([
3524
3730
  "reasoning.encrypted_content",
3525
3731
  "file_search_call.results",
3526
3732
  "message.output_text.logprobs"
3527
3733
  ])
3528
3734
  ).nullish(),
3529
- instructions: z15.string().nullish(),
3735
+ instructions: z16.string().nullish(),
3530
3736
  /**
3531
3737
  * Return the log probabilities of the tokens.
3532
3738
  *
@@ -3539,25 +3745,25 @@ var openaiResponsesProviderOptionsSchema = z15.object({
3539
3745
  * @see https://platform.openai.com/docs/api-reference/responses/create
3540
3746
  * @see https://cookbook.openai.com/examples/using_logprobs
3541
3747
  */
3542
- logprobs: z15.union([z15.boolean(), z15.number().min(1).max(TOP_LOGPROBS_MAX)]).optional(),
3748
+ logprobs: z16.union([z16.boolean(), z16.number().min(1).max(TOP_LOGPROBS_MAX)]).optional(),
3543
3749
  /**
3544
3750
  * The maximum number of total calls to built-in tools that can be processed in a response.
3545
3751
  * This maximum number applies across all built-in tool calls, not per individual tool.
3546
3752
  * Any further attempts to call a tool by the model will be ignored.
3547
3753
  */
3548
- maxToolCalls: z15.number().nullish(),
3549
- metadata: z15.any().nullish(),
3550
- parallelToolCalls: z15.boolean().nullish(),
3551
- previousResponseId: z15.string().nullish(),
3552
- promptCacheKey: z15.string().nullish(),
3553
- reasoningEffort: z15.string().nullish(),
3554
- reasoningSummary: z15.string().nullish(),
3555
- safetyIdentifier: z15.string().nullish(),
3556
- serviceTier: z15.enum(["auto", "flex", "priority"]).nullish(),
3557
- store: z15.boolean().nullish(),
3558
- strictJsonSchema: z15.boolean().nullish(),
3559
- textVerbosity: z15.enum(["low", "medium", "high"]).nullish(),
3560
- user: z15.string().nullish()
3754
+ maxToolCalls: z16.number().nullish(),
3755
+ metadata: z16.any().nullish(),
3756
+ parallelToolCalls: z16.boolean().nullish(),
3757
+ previousResponseId: z16.string().nullish(),
3758
+ promptCacheKey: z16.string().nullish(),
3759
+ reasoningEffort: z16.string().nullish(),
3760
+ reasoningSummary: z16.string().nullish(),
3761
+ safetyIdentifier: z16.string().nullish(),
3762
+ serviceTier: z16.enum(["auto", "flex", "priority"]).nullish(),
3763
+ store: z16.boolean().nullish(),
3764
+ strictJsonSchema: z16.boolean().nullish(),
3765
+ textVerbosity: z16.enum(["low", "medium", "high"]).nullish(),
3766
+ user: z16.string().nullish()
3561
3767
  });
3562
3768
 
3563
3769
  // src/speech/openai-speech-model.ts
@@ -3567,10 +3773,10 @@ import {
3567
3773
  parseProviderOptions as parseProviderOptions6,
3568
3774
  postJsonToApi as postJsonToApi6
3569
3775
  } from "@ai-sdk/provider-utils";
3570
- import { z as z16 } from "zod/v4";
3571
- var OpenAIProviderOptionsSchema = z16.object({
3572
- instructions: z16.string().nullish(),
3573
- speed: z16.number().min(0.25).max(4).default(1).nullish()
3776
+ import { z as z17 } from "zod/v4";
3777
+ var OpenAIProviderOptionsSchema = z17.object({
3778
+ instructions: z17.string().nullish(),
3779
+ speed: z17.number().min(0.25).max(4).default(1).nullish()
3574
3780
  });
3575
3781
  var OpenAISpeechModel = class {
3576
3782
  constructor(modelId, config) {
@@ -3681,33 +3887,33 @@ import {
3681
3887
  parseProviderOptions as parseProviderOptions7,
3682
3888
  postFormDataToApi
3683
3889
  } from "@ai-sdk/provider-utils";
3684
- import { z as z18 } from "zod/v4";
3890
+ import { z as z19 } from "zod/v4";
3685
3891
 
3686
3892
  // src/transcription/openai-transcription-options.ts
3687
- import { z as z17 } from "zod/v4";
3688
- var openAITranscriptionProviderOptions = z17.object({
3893
+ import { z as z18 } from "zod/v4";
3894
+ var openAITranscriptionProviderOptions = z18.object({
3689
3895
  /**
3690
3896
  * Additional information to include in the transcription response.
3691
3897
  */
3692
- include: z17.array(z17.string()).optional(),
3898
+ include: z18.array(z18.string()).optional(),
3693
3899
  /**
3694
3900
  * The language of the input audio in ISO-639-1 format.
3695
3901
  */
3696
- language: z17.string().optional(),
3902
+ language: z18.string().optional(),
3697
3903
  /**
3698
3904
  * An optional text to guide the model's style or continue a previous audio segment.
3699
3905
  */
3700
- prompt: z17.string().optional(),
3906
+ prompt: z18.string().optional(),
3701
3907
  /**
3702
3908
  * The sampling temperature, between 0 and 1.
3703
3909
  * @default 0
3704
3910
  */
3705
- temperature: z17.number().min(0).max(1).default(0).optional(),
3911
+ temperature: z18.number().min(0).max(1).default(0).optional(),
3706
3912
  /**
3707
3913
  * The timestamp granularities to populate for this transcription.
3708
3914
  * @default ['segment']
3709
3915
  */
3710
- timestampGranularities: z17.array(z17.enum(["word", "segment"])).default(["segment"]).optional()
3916
+ timestampGranularities: z18.array(z18.enum(["word", "segment"])).default(["segment"]).optional()
3711
3917
  });
3712
3918
 
3713
3919
  // src/transcription/openai-transcription-model.ts
@@ -3876,48 +4082,59 @@ var OpenAITranscriptionModel = class {
3876
4082
  };
3877
4083
  }
3878
4084
  };
3879
- var openaiTranscriptionResponseSchema = z18.object({
3880
- text: z18.string(),
3881
- language: z18.string().nullish(),
3882
- duration: z18.number().nullish(),
3883
- words: z18.array(
3884
- z18.object({
3885
- word: z18.string(),
3886
- start: z18.number(),
3887
- end: z18.number()
4085
+ var openaiTranscriptionResponseSchema = z19.object({
4086
+ text: z19.string(),
4087
+ language: z19.string().nullish(),
4088
+ duration: z19.number().nullish(),
4089
+ words: z19.array(
4090
+ z19.object({
4091
+ word: z19.string(),
4092
+ start: z19.number(),
4093
+ end: z19.number()
3888
4094
  })
3889
4095
  ).nullish(),
3890
- segments: z18.array(
3891
- z18.object({
3892
- id: z18.number(),
3893
- seek: z18.number(),
3894
- start: z18.number(),
3895
- end: z18.number(),
3896
- text: z18.string(),
3897
- tokens: z18.array(z18.number()),
3898
- temperature: z18.number(),
3899
- avg_logprob: z18.number(),
3900
- compression_ratio: z18.number(),
3901
- no_speech_prob: z18.number()
4096
+ segments: z19.array(
4097
+ z19.object({
4098
+ id: z19.number(),
4099
+ seek: z19.number(),
4100
+ start: z19.number(),
4101
+ end: z19.number(),
4102
+ text: z19.string(),
4103
+ tokens: z19.array(z19.number()),
4104
+ temperature: z19.number(),
4105
+ avg_logprob: z19.number(),
4106
+ compression_ratio: z19.number(),
4107
+ no_speech_prob: z19.number()
3902
4108
  })
3903
4109
  ).nullish()
3904
4110
  });
3905
4111
 
4112
+ // src/version.ts
4113
+ var VERSION = true ? "2.1.0-beta.10" : "0.0.0-test";
4114
+
3906
4115
  // src/openai-provider.ts
3907
4116
  function createOpenAI(options = {}) {
3908
4117
  var _a, _b;
3909
- const baseURL = (_a = withoutTrailingSlash(options.baseURL)) != null ? _a : "https://api.openai.com/v1";
4118
+ const baseURL = (_a = withoutTrailingSlash(
4119
+ loadOptionalSetting({
4120
+ settingValue: options.baseURL,
4121
+ environmentVariableName: "OPENAI_BASE_URL"
4122
+ })
4123
+ )) != null ? _a : "https://api.openai.com/v1";
3910
4124
  const providerName = (_b = options.name) != null ? _b : "openai";
3911
- const getHeaders = () => ({
3912
- Authorization: `Bearer ${loadApiKey({
3913
- apiKey: options.apiKey,
3914
- environmentVariableName: "OPENAI_API_KEY",
3915
- description: "OpenAI"
3916
- })}`,
3917
- "OpenAI-Organization": options.organization,
3918
- "OpenAI-Project": options.project,
3919
- ...options.headers
3920
- });
4125
+ const getHeaders = () => withUserAgentSuffix(
4126
+ {
4127
+ Authorization: `Bearer ${loadApiKey({
4128
+ apiKey: options.apiKey,
4129
+ environmentVariableName: "OPENAI_API_KEY",
4130
+ description: "OpenAI"
4131
+ })}`,
4132
+ "OpenAI-Organization": options.organization,
4133
+ "OpenAI-Project": options.project,
4134
+ ...options.headers
4135
+ },
4136
+ `ai-sdk/openai/${VERSION}`
4137
+ );
3921
4138
  const createChatModel = (modelId) => new OpenAIChatLanguageModel(modelId, {
3922
4139
  provider: `${providerName}.chat`,
3923
4140
  url: ({ path }) => `${baseURL}${path}`,
@@ -3992,6 +4209,7 @@ function createOpenAI(options = {}) {
3992
4209
  }
3993
4210
  var openai = createOpenAI();
3994
4211
  export {
4212
+ VERSION,
3995
4213
  createOpenAI,
3996
4214
  openai
3997
4215
  };