@zenning/openai 2.1.0 → 2.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -1,8 +1,10 @@
1
1
  // src/openai-provider.ts
2
2
  import {
3
3
  loadApiKey,
4
- withoutTrailingSlash
5
- } from "@ai-sdk/provider-utils";
4
+ loadOptionalSetting,
5
+ withoutTrailingSlash,
6
+ withUserAgentSuffix
7
+ } from "@zenning/provider-utils";
6
8
 
7
9
  // src/chat/openai-chat-language-model.ts
8
10
  import {
@@ -16,12 +18,12 @@ import {
16
18
  isParsableJson,
17
19
  parseProviderOptions,
18
20
  postJsonToApi
19
- } from "@ai-sdk/provider-utils";
21
+ } from "@zenning/provider-utils";
20
22
  import { z as z3 } from "zod/v4";
21
23
 
22
24
  // src/openai-error.ts
23
25
  import { z } from "zod/v4";
24
- import { createJsonErrorResponseHandler } from "@ai-sdk/provider-utils";
26
+ import { createJsonErrorResponseHandler } from "@zenning/provider-utils";
25
27
  var openaiErrorDataSchema = z.object({
26
28
  error: z.object({
27
29
  message: z.string(),
@@ -42,7 +44,7 @@ var openaiFailedResponseHandler = createJsonErrorResponseHandler({
42
44
  import {
43
45
  UnsupportedFunctionalityError
44
46
  } from "@zenning/provider";
45
- import { convertToBase64 } from "@ai-sdk/provider-utils";
47
+ import { convertToBase64 } from "@zenning/provider-utils";
46
48
  function convertToOpenAIChatMessages({
47
49
  prompt,
48
50
  systemMessageMode = "system"
@@ -250,7 +252,7 @@ function mapOpenAIFinishReason(finishReason) {
250
252
 
251
253
  // src/chat/openai-chat-options.ts
252
254
  import { z as z2 } from "zod/v4";
253
- var openaiProviderOptions = z2.object({
255
+ var openaiChatLanguageModelOptions = z2.object({
254
256
  /**
255
257
  * Modify the likelihood of specified tokens appearing in the completion.
256
258
  *
@@ -404,7 +406,7 @@ function prepareChatTools({
404
406
  // src/chat/openai-chat-language-model.ts
405
407
  var OpenAIChatLanguageModel = class {
406
408
  constructor(modelId, config) {
407
- this.specificationVersion = "v2";
409
+ this.specificationVersion = "v3";
408
410
  this.supportedUrls = {
409
411
  "image/*": [/^https?:\/\/.*$/]
410
412
  };
@@ -434,7 +436,7 @@ var OpenAIChatLanguageModel = class {
434
436
  const openaiOptions = (_a = await parseProviderOptions({
435
437
  provider: "openai",
436
438
  providerOptions,
437
- schema: openaiProviderOptions
439
+ schema: openaiChatLanguageModelOptions
438
440
  })) != null ? _a : {};
439
441
  const structuredOutputs = (_b = openaiOptions.structuredOutputs) != null ? _b : true;
440
442
  if (topK != null) {
@@ -1065,7 +1067,7 @@ import {
1065
1067
  createJsonResponseHandler as createJsonResponseHandler2,
1066
1068
  parseProviderOptions as parseProviderOptions2,
1067
1069
  postJsonToApi as postJsonToApi2
1068
- } from "@ai-sdk/provider-utils";
1070
+ } from "@zenning/provider-utils";
1069
1071
  import { z as z5 } from "zod/v4";
1070
1072
 
1071
1073
  // src/completion/convert-to-openai-completion-prompt.ts
@@ -1222,7 +1224,7 @@ var openaiCompletionProviderOptions = z4.object({
1222
1224
  // src/completion/openai-completion-language-model.ts
1223
1225
  var OpenAICompletionLanguageModel = class {
1224
1226
  constructor(modelId, config) {
1225
- this.specificationVersion = "v2";
1227
+ this.specificationVersion = "v3";
1226
1228
  this.supportedUrls = {
1227
1229
  // No URLs are supported for completion models.
1228
1230
  };
@@ -1501,7 +1503,7 @@ import {
1501
1503
  createJsonResponseHandler as createJsonResponseHandler3,
1502
1504
  parseProviderOptions as parseProviderOptions3,
1503
1505
  postJsonToApi as postJsonToApi3
1504
- } from "@ai-sdk/provider-utils";
1506
+ } from "@zenning/provider-utils";
1505
1507
  import { z as z7 } from "zod/v4";
1506
1508
 
1507
1509
  // src/embedding/openai-embedding-options.ts
@@ -1522,7 +1524,7 @@ var openaiEmbeddingProviderOptions = z6.object({
1522
1524
  // src/embedding/openai-embedding-model.ts
1523
1525
  var OpenAIEmbeddingModel = class {
1524
1526
  constructor(modelId, config) {
1525
- this.specificationVersion = "v2";
1527
+ this.specificationVersion = "v3";
1526
1528
  this.maxEmbeddingsPerCall = 2048;
1527
1529
  this.supportsParallelCalls = true;
1528
1530
  this.modelId = modelId;
@@ -1592,7 +1594,7 @@ import {
1592
1594
  combineHeaders as combineHeaders4,
1593
1595
  createJsonResponseHandler as createJsonResponseHandler4,
1594
1596
  postJsonToApi as postJsonToApi4
1595
- } from "@ai-sdk/provider-utils";
1597
+ } from "@zenning/provider-utils";
1596
1598
  import { z as z8 } from "zod/v4";
1597
1599
 
1598
1600
  // src/image/openai-image-options.ts
@@ -1608,7 +1610,7 @@ var OpenAIImageModel = class {
1608
1610
  constructor(modelId, config) {
1609
1611
  this.modelId = modelId;
1610
1612
  this.config = config;
1611
- this.specificationVersion = "v2";
1613
+ this.specificationVersion = "v3";
1612
1614
  }
1613
1615
  get maxImagesPerCall() {
1614
1616
  var _a;
@@ -1688,7 +1690,7 @@ var openaiImageResponseSchema = z8.object({
1688
1690
  });
1689
1691
 
1690
1692
  // src/tool/code-interpreter.ts
1691
- import { createProviderDefinedToolFactoryWithOutputSchema } from "@ai-sdk/provider-utils";
1693
+ import { createProviderDefinedToolFactoryWithOutputSchema } from "@zenning/provider-utils";
1692
1694
  import { z as z9 } from "zod/v4";
1693
1695
  var codeInterpreterInputSchema = z9.object({
1694
1696
  code: z9.string().nullish(),
@@ -1721,7 +1723,7 @@ var codeInterpreter = (args = {}) => {
1721
1723
  };
1722
1724
 
1723
1725
  // src/tool/file-search.ts
1724
- import { createProviderDefinedToolFactoryWithOutputSchema as createProviderDefinedToolFactoryWithOutputSchema2 } from "@ai-sdk/provider-utils";
1726
+ import { createProviderDefinedToolFactoryWithOutputSchema as createProviderDefinedToolFactoryWithOutputSchema2 } from "@zenning/provider-utils";
1725
1727
  import { z as z10 } from "zod/v4";
1726
1728
  var comparisonFilterSchema = z10.object({
1727
1729
  key: z10.string(),
@@ -1763,7 +1765,7 @@ var fileSearch = createProviderDefinedToolFactoryWithOutputSchema2({
1763
1765
  });
1764
1766
 
1765
1767
  // src/tool/image-generation.ts
1766
- import { createProviderDefinedToolFactoryWithOutputSchema as createProviderDefinedToolFactoryWithOutputSchema3 } from "@ai-sdk/provider-utils";
1768
+ import { createProviderDefinedToolFactoryWithOutputSchema as createProviderDefinedToolFactoryWithOutputSchema3 } from "@zenning/provider-utils";
1767
1769
  import { z as z11 } from "zod/v4";
1768
1770
  var imageGenerationArgsSchema = z11.object({
1769
1771
  background: z11.enum(["auto", "opaque", "transparent"]).optional(),
@@ -1776,6 +1778,7 @@ var imageGenerationArgsSchema = z11.object({
1776
1778
  moderation: z11.enum(["auto"]).optional(),
1777
1779
  outputCompression: z11.number().int().min(0).max(100).optional(),
1778
1780
  outputFormat: z11.enum(["png", "jpeg", "webp"]).optional(),
1781
+ partialImages: z11.number().int().min(0).max(3).optional(),
1779
1782
  quality: z11.enum(["auto", "low", "medium", "high"]).optional(),
1780
1783
  size: z11.enum(["1024x1024", "1024x1536", "1536x1024", "auto"]).optional()
1781
1784
  }).strict();
@@ -1792,39 +1795,62 @@ var imageGeneration = (args = {}) => {
1792
1795
  return imageGenerationToolFactory(args);
1793
1796
  };
1794
1797
 
1795
- // src/tool/web-search.ts
1796
- import { createProviderDefinedToolFactory } from "@ai-sdk/provider-utils";
1798
+ // src/tool/local-shell.ts
1799
+ import { createProviderDefinedToolFactoryWithOutputSchema as createProviderDefinedToolFactoryWithOutputSchema4 } from "@zenning/provider-utils";
1797
1800
  import { z as z12 } from "zod/v4";
1798
- var webSearchArgsSchema = z12.object({
1799
- filters: z12.object({
1800
- allowedDomains: z12.array(z12.string()).optional()
1801
+ var localShellInputSchema = z12.object({
1802
+ action: z12.object({
1803
+ type: z12.literal("exec"),
1804
+ command: z12.array(z12.string()),
1805
+ timeoutMs: z12.number().optional(),
1806
+ user: z12.string().optional(),
1807
+ workingDirectory: z12.string().optional(),
1808
+ env: z12.record(z12.string(), z12.string()).optional()
1809
+ })
1810
+ });
1811
+ var localShellOutputSchema = z12.object({
1812
+ output: z12.string()
1813
+ });
1814
+ var localShell = createProviderDefinedToolFactoryWithOutputSchema4({
1815
+ id: "openai.local_shell",
1816
+ name: "local_shell",
1817
+ inputSchema: localShellInputSchema,
1818
+ outputSchema: localShellOutputSchema
1819
+ });
1820
+
1821
+ // src/tool/web-search.ts
1822
+ import { createProviderDefinedToolFactory } from "@zenning/provider-utils";
1823
+ import { z as z13 } from "zod/v4";
1824
+ var webSearchArgsSchema = z13.object({
1825
+ filters: z13.object({
1826
+ allowedDomains: z13.array(z13.string()).optional()
1801
1827
  }).optional(),
1802
- searchContextSize: z12.enum(["low", "medium", "high"]).optional(),
1803
- userLocation: z12.object({
1804
- type: z12.literal("approximate"),
1805
- country: z12.string().optional(),
1806
- city: z12.string().optional(),
1807
- region: z12.string().optional(),
1808
- timezone: z12.string().optional()
1828
+ searchContextSize: z13.enum(["low", "medium", "high"]).optional(),
1829
+ userLocation: z13.object({
1830
+ type: z13.literal("approximate"),
1831
+ country: z13.string().optional(),
1832
+ city: z13.string().optional(),
1833
+ region: z13.string().optional(),
1834
+ timezone: z13.string().optional()
1809
1835
  }).optional()
1810
1836
  });
1811
1837
  var webSearchToolFactory = createProviderDefinedToolFactory({
1812
1838
  id: "openai.web_search",
1813
1839
  name: "web_search",
1814
- inputSchema: z12.object({
1815
- action: z12.discriminatedUnion("type", [
1816
- z12.object({
1817
- type: z12.literal("search"),
1818
- query: z12.string().nullish()
1840
+ inputSchema: z13.object({
1841
+ action: z13.discriminatedUnion("type", [
1842
+ z13.object({
1843
+ type: z13.literal("search"),
1844
+ query: z13.string().nullish()
1819
1845
  }),
1820
- z12.object({
1821
- type: z12.literal("open_page"),
1822
- url: z12.string()
1846
+ z13.object({
1847
+ type: z13.literal("open_page"),
1848
+ url: z13.string()
1823
1849
  }),
1824
- z12.object({
1825
- type: z12.literal("find"),
1826
- url: z12.string(),
1827
- pattern: z12.string()
1850
+ z13.object({
1851
+ type: z13.literal("find"),
1852
+ url: z13.string(),
1853
+ pattern: z13.string()
1828
1854
  })
1829
1855
  ]).nullish()
1830
1856
  })
@@ -1834,59 +1860,59 @@ var webSearch = (args = {}) => {
1834
1860
  };
1835
1861
 
1836
1862
  // src/tool/web-search-preview.ts
1837
- import { createProviderDefinedToolFactory as createProviderDefinedToolFactory2 } from "@ai-sdk/provider-utils";
1838
- import { z as z13 } from "zod/v4";
1839
- var webSearchPreviewArgsSchema = z13.object({
1863
+ import { createProviderDefinedToolFactory as createProviderDefinedToolFactory2 } from "@zenning/provider-utils";
1864
+ import { z as z14 } from "zod/v4";
1865
+ var webSearchPreviewArgsSchema = z14.object({
1840
1866
  /**
1841
1867
  * Search context size to use for the web search.
1842
1868
  * - high: Most comprehensive context, highest cost, slower response
1843
1869
  * - medium: Balanced context, cost, and latency (default)
1844
1870
  * - low: Least context, lowest cost, fastest response
1845
1871
  */
1846
- searchContextSize: z13.enum(["low", "medium", "high"]).optional(),
1872
+ searchContextSize: z14.enum(["low", "medium", "high"]).optional(),
1847
1873
  /**
1848
1874
  * User location information to provide geographically relevant search results.
1849
1875
  */
1850
- userLocation: z13.object({
1876
+ userLocation: z14.object({
1851
1877
  /**
1852
1878
  * Type of location (always 'approximate')
1853
1879
  */
1854
- type: z13.literal("approximate"),
1880
+ type: z14.literal("approximate"),
1855
1881
  /**
1856
1882
  * Two-letter ISO country code (e.g., 'US', 'GB')
1857
1883
  */
1858
- country: z13.string().optional(),
1884
+ country: z14.string().optional(),
1859
1885
  /**
1860
1886
  * City name (free text, e.g., 'Minneapolis')
1861
1887
  */
1862
- city: z13.string().optional(),
1888
+ city: z14.string().optional(),
1863
1889
  /**
1864
1890
  * Region name (free text, e.g., 'Minnesota')
1865
1891
  */
1866
- region: z13.string().optional(),
1892
+ region: z14.string().optional(),
1867
1893
  /**
1868
1894
  * IANA timezone (e.g., 'America/Chicago')
1869
1895
  */
1870
- timezone: z13.string().optional()
1896
+ timezone: z14.string().optional()
1871
1897
  }).optional()
1872
1898
  });
1873
1899
  var webSearchPreview = createProviderDefinedToolFactory2({
1874
1900
  id: "openai.web_search_preview",
1875
1901
  name: "web_search_preview",
1876
- inputSchema: z13.object({
1877
- action: z13.discriminatedUnion("type", [
1878
- z13.object({
1879
- type: z13.literal("search"),
1880
- query: z13.string().nullish()
1902
+ inputSchema: z14.object({
1903
+ action: z14.discriminatedUnion("type", [
1904
+ z14.object({
1905
+ type: z14.literal("search"),
1906
+ query: z14.string().nullish()
1881
1907
  }),
1882
- z13.object({
1883
- type: z13.literal("open_page"),
1884
- url: z13.string()
1908
+ z14.object({
1909
+ type: z14.literal("open_page"),
1910
+ url: z14.string()
1885
1911
  }),
1886
- z13.object({
1887
- type: z13.literal("find"),
1888
- url: z13.string(),
1889
- pattern: z13.string()
1912
+ z14.object({
1913
+ type: z14.literal("find"),
1914
+ url: z14.string(),
1915
+ pattern: z14.string()
1890
1916
  })
1891
1917
  ]).nullish()
1892
1918
  })
@@ -1924,13 +1950,27 @@ var openaiTools = {
1924
1950
  *
1925
1951
  * Must have name `image_generation`.
1926
1952
  *
1927
- * @param size - Image dimensions (e.g., 1024x1024, 1024x1536)
1928
- * @param quality - Rendering quality (e.g. low, medium, high)
1929
- * @param format - File output format
1930
- * @param compression - Compression level (0-100%) for JPEG and WebP formats
1931
- * @param background - Transparent or opaque
1953
+ * @param background - Background type for the generated image. One of 'auto', 'opaque', or 'transparent'.
1954
+ * @param inputFidelity - Input fidelity for the generated image. One of 'low' or 'high'.
1955
+ * @param inputImageMask - Optional mask for inpainting. Contains fileId and/or imageUrl.
1956
+ * @param model - The image generation model to use. Default: gpt-image-1.
1957
+ * @param moderation - Moderation level for the generated image. Default: 'auto'.
1958
+ * @param outputCompression - Compression level for the output image (0-100).
1959
+ * @param outputFormat - The output format of the generated image. One of 'png', 'jpeg', or 'webp'.
1960
+ * @param partialImages - Number of partial images to generate in streaming mode (0-3).
1961
+ * @param quality - The quality of the generated image. One of 'auto', 'low', 'medium', or 'high'.
1962
+ * @param size - The size of the generated image. One of 'auto', '1024x1024', '1024x1536', or '1536x1024'.
1932
1963
  */
1933
1964
  imageGeneration,
1965
+ /**
1966
+ * Local shell is a tool that allows agents to run shell commands locally
1967
+ * on a machine you or the user provides.
1968
+ *
1969
+ * Supported models: `gpt-5-codex` and `codex-mini-latest`
1970
+ *
1971
+ * Must have name `local_shell`.
1972
+ */
1973
+ localShell,
1934
1974
  /**
1935
1975
  * Web search allows models to access up-to-date information from the internet
1936
1976
  * and provide answers with sourced citations.
@@ -1967,15 +2007,15 @@ import {
1967
2007
  generateId as generateId2,
1968
2008
  parseProviderOptions as parseProviderOptions5,
1969
2009
  postJsonToApi as postJsonToApi5
1970
- } from "@ai-sdk/provider-utils";
1971
- import { z as z15 } from "zod/v4";
2010
+ } from "@zenning/provider-utils";
2011
+ import { z as z16 } from "zod/v4";
1972
2012
 
1973
2013
  // src/responses/convert-to-openai-responses-input.ts
1974
2014
  import {
1975
2015
  UnsupportedFunctionalityError as UnsupportedFunctionalityError4
1976
2016
  } from "@zenning/provider";
1977
- import { convertToBase64 as convertToBase642, parseProviderOptions as parseProviderOptions4 } from "@ai-sdk/provider-utils";
1978
- import { z as z14 } from "zod/v4";
2017
+ import { convertToBase64 as convertToBase642, parseProviderOptions as parseProviderOptions4 } from "@zenning/provider-utils";
2018
+ import { z as z15 } from "zod/v4";
1979
2019
  function isFileId(data, prefixes) {
1980
2020
  if (!prefixes) return false;
1981
2021
  return prefixes.some((prefix) => data.startsWith(prefix));
@@ -1984,9 +2024,10 @@ async function convertToOpenAIResponsesInput({
1984
2024
  prompt,
1985
2025
  systemMessageMode,
1986
2026
  fileIdPrefixes,
1987
- store
2027
+ store,
2028
+ hasLocalShellTool = false
1988
2029
  }) {
1989
- var _a, _b, _c, _d, _e, _f;
2030
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i;
1990
2031
  const input = [];
1991
2032
  const warnings = [];
1992
2033
  for (const { role, content } of prompt) {
@@ -2079,12 +2120,29 @@ async function convertToOpenAIResponsesInput({
2079
2120
  if (part.providerExecuted) {
2080
2121
  break;
2081
2122
  }
2123
+ if (hasLocalShellTool && part.toolName === "local_shell") {
2124
+ const parsedInput = localShellInputSchema.parse(part.input);
2125
+ input.push({
2126
+ type: "local_shell_call",
2127
+ call_id: part.toolCallId,
2128
+ id: (_f = (_e = (_d = part.providerOptions) == null ? void 0 : _d.openai) == null ? void 0 : _e.itemId) != null ? _f : void 0,
2129
+ action: {
2130
+ type: "exec",
2131
+ command: parsedInput.action.command,
2132
+ timeout_ms: parsedInput.action.timeoutMs,
2133
+ user: parsedInput.action.user,
2134
+ working_directory: parsedInput.action.workingDirectory,
2135
+ env: parsedInput.action.env
2136
+ }
2137
+ });
2138
+ break;
2139
+ }
2082
2140
  input.push({
2083
2141
  type: "function_call",
2084
2142
  call_id: part.toolCallId,
2085
2143
  name: part.toolName,
2086
2144
  arguments: JSON.stringify(part.input),
2087
- id: (_f = (_e = (_d = part.providerOptions) == null ? void 0 : _d.openai) == null ? void 0 : _e.itemId) != null ? _f : void 0
2145
+ id: (_i = (_h = (_g = part.providerOptions) == null ? void 0 : _g.openai) == null ? void 0 : _h.itemId) != null ? _i : void 0
2088
2146
  });
2089
2147
  break;
2090
2148
  }
@@ -2108,26 +2166,40 @@ async function convertToOpenAIResponsesInput({
2108
2166
  });
2109
2167
  const reasoningId = providerOptions == null ? void 0 : providerOptions.itemId;
2110
2168
  if (reasoningId != null) {
2111
- const existingReasoningMessage = reasoningMessages[reasoningId];
2112
- const summaryParts = [];
2113
- if (part.text.length > 0) {
2114
- summaryParts.push({ type: "summary_text", text: part.text });
2115
- } else if (existingReasoningMessage !== void 0) {
2116
- warnings.push({
2117
- type: "other",
2118
- message: `Cannot append empty reasoning part to existing reasoning sequence. Skipping reasoning part: ${JSON.stringify(part)}.`
2119
- });
2120
- }
2121
- if (existingReasoningMessage === void 0) {
2122
- reasoningMessages[reasoningId] = {
2123
- type: "reasoning",
2124
- id: reasoningId,
2125
- encrypted_content: providerOptions == null ? void 0 : providerOptions.reasoningEncryptedContent,
2126
- summary: summaryParts
2127
- };
2128
- input.push(reasoningMessages[reasoningId]);
2169
+ const reasoningMessage = reasoningMessages[reasoningId];
2170
+ if (store) {
2171
+ if (reasoningMessage === void 0) {
2172
+ input.push({ type: "item_reference", id: reasoningId });
2173
+ reasoningMessages[reasoningId] = {
2174
+ type: "reasoning",
2175
+ id: reasoningId,
2176
+ summary: []
2177
+ };
2178
+ }
2129
2179
  } else {
2130
- existingReasoningMessage.summary.push(...summaryParts);
2180
+ const summaryParts = [];
2181
+ if (part.text.length > 0) {
2182
+ summaryParts.push({
2183
+ type: "summary_text",
2184
+ text: part.text
2185
+ });
2186
+ } else if (reasoningMessage !== void 0) {
2187
+ warnings.push({
2188
+ type: "other",
2189
+ message: `Cannot append empty reasoning part to existing reasoning sequence. Skipping reasoning part: ${JSON.stringify(part)}.`
2190
+ });
2191
+ }
2192
+ if (reasoningMessage === void 0) {
2193
+ reasoningMessages[reasoningId] = {
2194
+ type: "reasoning",
2195
+ id: reasoningId,
2196
+ encrypted_content: providerOptions == null ? void 0 : providerOptions.reasoningEncryptedContent,
2197
+ summary: summaryParts
2198
+ };
2199
+ input.push(reasoningMessages[reasoningId]);
2200
+ } else {
2201
+ reasoningMessage.summary.push(...summaryParts);
2202
+ }
2131
2203
  }
2132
2204
  } else {
2133
2205
  warnings.push({
@@ -2144,6 +2216,14 @@ async function convertToOpenAIResponsesInput({
2144
2216
  case "tool": {
2145
2217
  for (const part of content) {
2146
2218
  const output = part.output;
2219
+ if (hasLocalShellTool && part.toolName === "local_shell" && output.type === "json") {
2220
+ input.push({
2221
+ type: "local_shell_call_output",
2222
+ call_id: part.toolCallId,
2223
+ output: localShellOutputSchema.parse(output.value).output
2224
+ });
2225
+ break;
2226
+ }
2147
2227
  let contentValue;
2148
2228
  switch (output.type) {
2149
2229
  case "text":
@@ -2172,9 +2252,9 @@ async function convertToOpenAIResponsesInput({
2172
2252
  }
2173
2253
  return { input, warnings };
2174
2254
  }
2175
- var openaiResponsesReasoningProviderOptionsSchema = z14.object({
2176
- itemId: z14.string().nullish(),
2177
- reasoningEncryptedContent: z14.string().nullish()
2255
+ var openaiResponsesReasoningProviderOptionsSchema = z15.object({
2256
+ itemId: z15.string().nullish(),
2257
+ reasoningEncryptedContent: z15.string().nullish()
2178
2258
  });
2179
2259
 
2180
2260
  // src/responses/map-openai-responses-finish-reason.ts
@@ -2237,6 +2317,12 @@ function prepareResponsesTools({
2237
2317
  });
2238
2318
  break;
2239
2319
  }
2320
+ case "openai.local_shell": {
2321
+ openaiTools2.push({
2322
+ type: "local_shell"
2323
+ });
2324
+ break;
2325
+ }
2240
2326
  case "openai.web_search_preview": {
2241
2327
  const args = webSearchPreviewArgsSchema.parse(tool.args);
2242
2328
  openaiTools2.push({
@@ -2275,11 +2361,12 @@ function prepareResponsesTools({
2275
2361
  image_url: args.inputImageMask.imageUrl
2276
2362
  } : void 0,
2277
2363
  model: args.model,
2278
- size: args.size,
2279
- quality: args.quality,
2280
2364
  moderation: args.moderation,
2365
+ partial_images: args.partialImages,
2366
+ quality: args.quality,
2367
+ output_compression: args.outputCompression,
2281
2368
  output_format: args.outputFormat,
2282
- output_compression: args.outputCompression
2369
+ size: args.size
2283
2370
  });
2284
2371
  break;
2285
2372
  }
@@ -2316,73 +2403,91 @@ function prepareResponsesTools({
2316
2403
  }
2317
2404
 
2318
2405
  // src/responses/openai-responses-language-model.ts
2319
- var webSearchCallItem = z15.object({
2320
- type: z15.literal("web_search_call"),
2321
- id: z15.string(),
2322
- status: z15.string(),
2323
- action: z15.discriminatedUnion("type", [
2324
- z15.object({
2325
- type: z15.literal("search"),
2326
- query: z15.string().nullish()
2406
+ var webSearchCallItem = z16.object({
2407
+ type: z16.literal("web_search_call"),
2408
+ id: z16.string(),
2409
+ status: z16.string(),
2410
+ action: z16.discriminatedUnion("type", [
2411
+ z16.object({
2412
+ type: z16.literal("search"),
2413
+ query: z16.string().nullish()
2327
2414
  }),
2328
- z15.object({
2329
- type: z15.literal("open_page"),
2330
- url: z15.string()
2415
+ z16.object({
2416
+ type: z16.literal("open_page"),
2417
+ url: z16.string()
2331
2418
  }),
2332
- z15.object({
2333
- type: z15.literal("find"),
2334
- url: z15.string(),
2335
- pattern: z15.string()
2419
+ z16.object({
2420
+ type: z16.literal("find"),
2421
+ url: z16.string(),
2422
+ pattern: z16.string()
2336
2423
  })
2337
2424
  ]).nullish()
2338
2425
  });
2339
- var fileSearchCallItem = z15.object({
2340
- type: z15.literal("file_search_call"),
2341
- id: z15.string(),
2342
- queries: z15.array(z15.string()),
2343
- results: z15.array(
2344
- z15.object({
2345
- attributes: z15.record(z15.string(), z15.unknown()),
2346
- file_id: z15.string(),
2347
- filename: z15.string(),
2348
- score: z15.number(),
2349
- text: z15.string()
2426
+ var fileSearchCallItem = z16.object({
2427
+ type: z16.literal("file_search_call"),
2428
+ id: z16.string(),
2429
+ queries: z16.array(z16.string()),
2430
+ results: z16.array(
2431
+ z16.object({
2432
+ attributes: z16.record(z16.string(), z16.unknown()),
2433
+ file_id: z16.string(),
2434
+ filename: z16.string(),
2435
+ score: z16.number(),
2436
+ text: z16.string()
2350
2437
  })
2351
2438
  ).nullish()
2352
2439
  });
2353
- var codeInterpreterCallItem = z15.object({
2354
- type: z15.literal("code_interpreter_call"),
2355
- id: z15.string(),
2356
- code: z15.string().nullable(),
2357
- container_id: z15.string(),
2358
- outputs: z15.array(
2359
- z15.discriminatedUnion("type", [
2360
- z15.object({ type: z15.literal("logs"), logs: z15.string() }),
2361
- z15.object({ type: z15.literal("image"), url: z15.string() })
2440
+ var codeInterpreterCallItem = z16.object({
2441
+ type: z16.literal("code_interpreter_call"),
2442
+ id: z16.string(),
2443
+ code: z16.string().nullable(),
2444
+ container_id: z16.string(),
2445
+ outputs: z16.array(
2446
+ z16.discriminatedUnion("type", [
2447
+ z16.object({ type: z16.literal("logs"), logs: z16.string() }),
2448
+ z16.object({ type: z16.literal("image"), url: z16.string() })
2362
2449
  ])
2363
2450
  ).nullable()
2364
2451
  });
2365
- var imageGenerationCallItem = z15.object({
2366
- type: z15.literal("image_generation_call"),
2367
- id: z15.string(),
2368
- result: z15.string()
2452
+ var localShellCallItem = z16.object({
2453
+ type: z16.literal("local_shell_call"),
2454
+ id: z16.string(),
2455
+ call_id: z16.string(),
2456
+ action: z16.object({
2457
+ type: z16.literal("exec"),
2458
+ command: z16.array(z16.string()),
2459
+ timeout_ms: z16.number().optional(),
2460
+ user: z16.string().optional(),
2461
+ working_directory: z16.string().optional(),
2462
+ env: z16.record(z16.string(), z16.string()).optional()
2463
+ })
2464
+ });
2465
+ var sourceExecutionFileCodeInterpreterItem = z16.object({
2466
+ containerId: z16.string(),
2467
+ fileId: z16.string(),
2468
+ filename: z16.string()
2469
+ });
2470
+ var imageGenerationCallItem = z16.object({
2471
+ type: z16.literal("image_generation_call"),
2472
+ id: z16.string(),
2473
+ result: z16.string()
2369
2474
  });
2370
2475
  var TOP_LOGPROBS_MAX = 20;
2371
- var LOGPROBS_SCHEMA = z15.array(
2372
- z15.object({
2373
- token: z15.string(),
2374
- logprob: z15.number(),
2375
- top_logprobs: z15.array(
2376
- z15.object({
2377
- token: z15.string(),
2378
- logprob: z15.number()
2476
+ var LOGPROBS_SCHEMA = z16.array(
2477
+ z16.object({
2478
+ token: z16.string(),
2479
+ logprob: z16.number(),
2480
+ top_logprobs: z16.array(
2481
+ z16.object({
2482
+ token: z16.string(),
2483
+ logprob: z16.number()
2379
2484
  })
2380
2485
  )
2381
2486
  })
2382
2487
  );
2383
2488
  var OpenAIResponsesLanguageModel = class {
2384
2489
  constructor(modelId, config) {
2385
- this.specificationVersion = "v2";
2490
+ this.specificationVersion = "v3";
2386
2491
  this.supportedUrls = {
2387
2492
  "image/*": [/^https?:\/\/.*$/],
2388
2493
  "application/pdf": [/^https?:\/\/.*$/]
@@ -2441,7 +2546,8 @@ var OpenAIResponsesLanguageModel = class {
2441
2546
  prompt,
2442
2547
  systemMessageMode: modelConfig.systemMessageMode,
2443
2548
  fileIdPrefixes: this.config.fileIdPrefixes,
2444
- store: (_a = openaiOptions == null ? void 0 : openaiOptions.store) != null ? _a : true
2549
+ store: (_a = openaiOptions == null ? void 0 : openaiOptions.store) != null ? _a : true,
2550
+ hasLocalShellTool: hasOpenAITool("openai.local_shell")
2445
2551
  });
2446
2552
  warnings.push(...inputWarnings);
2447
2553
  const strictJsonSchema = (_b = openaiOptions == null ? void 0 : openaiOptions.strictJsonSchema) != null ? _b : false;
@@ -2586,7 +2692,7 @@ var OpenAIResponsesLanguageModel = class {
2586
2692
  };
2587
2693
  }
2588
2694
  async doGenerate(options) {
2589
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u;
2695
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x;
2590
2696
  const {
2591
2697
  args: body,
2592
2698
  warnings,
@@ -2596,6 +2702,7 @@ var OpenAIResponsesLanguageModel = class {
2596
2702
  path: "/responses",
2597
2703
  modelId: this.modelId
2598
2704
  });
2705
+ const providerKey = this.config.provider.replace(".responses", "");
2599
2706
  const {
2600
2707
  responseHeaders,
2601
2708
  value: response,
@@ -2606,45 +2713,50 @@ var OpenAIResponsesLanguageModel = class {
2606
2713
  body,
2607
2714
  failedResponseHandler: openaiFailedResponseHandler,
2608
2715
  successfulResponseHandler: createJsonResponseHandler5(
2609
- z15.object({
2610
- id: z15.string(),
2611
- created_at: z15.number(),
2612
- error: z15.object({
2613
- code: z15.string(),
2614
- message: z15.string()
2716
+ z16.object({
2717
+ id: z16.string(),
2718
+ created_at: z16.number(),
2719
+ error: z16.object({
2720
+ code: z16.string(),
2721
+ message: z16.string()
2615
2722
  }).nullish(),
2616
- model: z15.string(),
2617
- output: z15.array(
2618
- z15.discriminatedUnion("type", [
2619
- z15.object({
2620
- type: z15.literal("message"),
2621
- role: z15.literal("assistant"),
2622
- id: z15.string(),
2623
- content: z15.array(
2624
- z15.object({
2625
- type: z15.literal("output_text"),
2626
- text: z15.string(),
2723
+ model: z16.string(),
2724
+ output: z16.array(
2725
+ z16.discriminatedUnion("type", [
2726
+ z16.object({
2727
+ type: z16.literal("message"),
2728
+ role: z16.literal("assistant"),
2729
+ id: z16.string(),
2730
+ content: z16.array(
2731
+ z16.object({
2732
+ type: z16.literal("output_text"),
2733
+ text: z16.string(),
2627
2734
  logprobs: LOGPROBS_SCHEMA.nullish(),
2628
- annotations: z15.array(
2629
- z15.discriminatedUnion("type", [
2630
- z15.object({
2631
- type: z15.literal("url_citation"),
2632
- start_index: z15.number(),
2633
- end_index: z15.number(),
2634
- url: z15.string(),
2635
- title: z15.string()
2735
+ annotations: z16.array(
2736
+ z16.discriminatedUnion("type", [
2737
+ z16.object({
2738
+ type: z16.literal("url_citation"),
2739
+ start_index: z16.number(),
2740
+ end_index: z16.number(),
2741
+ url: z16.string(),
2742
+ title: z16.string()
2636
2743
  }),
2637
- z15.object({
2638
- type: z15.literal("file_citation"),
2639
- file_id: z15.string(),
2640
- filename: z15.string().nullish(),
2641
- index: z15.number().nullish(),
2642
- start_index: z15.number().nullish(),
2643
- end_index: z15.number().nullish(),
2644
- quote: z15.string().nullish()
2744
+ z16.object({
2745
+ type: z16.literal("file_citation"),
2746
+ file_id: z16.string(),
2747
+ filename: z16.string().nullish(),
2748
+ index: z16.number().nullish(),
2749
+ start_index: z16.number().nullish(),
2750
+ end_index: z16.number().nullish(),
2751
+ quote: z16.string().nullish()
2645
2752
  }),
2646
- z15.object({
2647
- type: z15.literal("container_file_citation")
2753
+ z16.object({
2754
+ type: z16.literal("container_file_citation"),
2755
+ container_id: z16.string(),
2756
+ end_index: z16.number(),
2757
+ file_id: z16.string(),
2758
+ filename: z16.string(),
2759
+ start_index: z16.number()
2648
2760
  })
2649
2761
  ])
2650
2762
  )
@@ -2655,33 +2767,34 @@ var OpenAIResponsesLanguageModel = class {
2655
2767
  fileSearchCallItem,
2656
2768
  codeInterpreterCallItem,
2657
2769
  imageGenerationCallItem,
2658
- z15.object({
2659
- type: z15.literal("function_call"),
2660
- call_id: z15.string(),
2661
- name: z15.string(),
2662
- arguments: z15.string(),
2663
- id: z15.string()
2770
+ localShellCallItem,
2771
+ z16.object({
2772
+ type: z16.literal("function_call"),
2773
+ call_id: z16.string(),
2774
+ name: z16.string(),
2775
+ arguments: z16.string(),
2776
+ id: z16.string()
2664
2777
  }),
2665
- z15.object({
2666
- type: z15.literal("computer_call"),
2667
- id: z15.string(),
2668
- status: z15.string().optional()
2778
+ z16.object({
2779
+ type: z16.literal("computer_call"),
2780
+ id: z16.string(),
2781
+ status: z16.string().optional()
2669
2782
  }),
2670
- z15.object({
2671
- type: z15.literal("reasoning"),
2672
- id: z15.string(),
2673
- encrypted_content: z15.string().nullish(),
2674
- summary: z15.array(
2675
- z15.object({
2676
- type: z15.literal("summary_text"),
2677
- text: z15.string()
2783
+ z16.object({
2784
+ type: z16.literal("reasoning"),
2785
+ id: z16.string(),
2786
+ encrypted_content: z16.string().nullish(),
2787
+ summary: z16.array(
2788
+ z16.object({
2789
+ type: z16.literal("summary_text"),
2790
+ text: z16.string()
2678
2791
  })
2679
2792
  )
2680
2793
  })
2681
2794
  ])
2682
2795
  ),
2683
- service_tier: z15.string().nullish(),
2684
- incomplete_details: z15.object({ reason: z15.string() }).nullable(),
2796
+ service_tier: z16.string().nullish(),
2797
+ incomplete_details: z16.object({ reason: z16.string() }).nullish(),
2685
2798
  usage: usageSchema2
2686
2799
  })
2687
2800
  ),
@@ -2741,6 +2854,20 @@ var OpenAIResponsesLanguageModel = class {
2741
2854
  });
2742
2855
  break;
2743
2856
  }
2857
+ case "local_shell_call": {
2858
+ content.push({
2859
+ type: "tool-call",
2860
+ toolCallId: part.call_id,
2861
+ toolName: "local_shell",
2862
+ input: JSON.stringify({ action: part.action }),
2863
+ providerMetadata: {
2864
+ openai: {
2865
+ itemId: part.id
2866
+ }
2867
+ }
2868
+ });
2869
+ break;
2870
+ }
2744
2871
  case "message": {
2745
2872
  for (const contentPart of part.content) {
2746
2873
  if (((_c = (_b = options.providerOptions) == null ? void 0 : _b.openai) == null ? void 0 : _c.logprobs) && contentPart.logprobs) {
@@ -2751,7 +2878,8 @@ var OpenAIResponsesLanguageModel = class {
2751
2878
  text: contentPart.text,
2752
2879
  providerMetadata: {
2753
2880
  openai: {
2754
- itemId: part.id
2881
+ itemId: part.id,
2882
+ annotations: contentPart.annotations
2755
2883
  }
2756
2884
  }
2757
2885
  });
@@ -2776,6 +2904,19 @@ var OpenAIResponsesLanguageModel = class {
2776
2904
  startIndex: (_m = annotation.start_index) != null ? _m : void 0,
2777
2905
  endIndex: (_n = annotation.end_index) != null ? _n : void 0
2778
2906
  });
2907
+ } else if (annotation.type === "container_file_citation") {
2908
+ content.push({
2909
+ type: "source",
2910
+ sourceType: "executionFile",
2911
+ id: (_q = (_p = (_o = this.config).generateId) == null ? void 0 : _p.call(_o)) != null ? _q : generateId2(),
2912
+ providerMetadata: {
2913
+ [providerKey]: {
2914
+ containerId: annotation.container_id,
2915
+ fileId: annotation.file_id,
2916
+ filename: annotation.filename
2917
+ }
2918
+ }
2919
+ });
2779
2920
  }
2780
2921
  }
2781
2922
  }
@@ -2847,13 +2988,13 @@ var OpenAIResponsesLanguageModel = class {
2847
2988
  toolName: "file_search",
2848
2989
  result: {
2849
2990
  queries: part.queries,
2850
- results: (_p = (_o = part.results) == null ? void 0 : _o.map((result) => ({
2991
+ results: (_s = (_r = part.results) == null ? void 0 : _r.map((result) => ({
2851
2992
  attributes: result.attributes,
2852
2993
  fileId: result.file_id,
2853
2994
  filename: result.filename,
2854
2995
  score: result.score,
2855
2996
  text: result.text
2856
- }))) != null ? _p : null
2997
+ }))) != null ? _s : null
2857
2998
  },
2858
2999
  providerExecuted: true
2859
3000
  });
@@ -2895,15 +3036,15 @@ var OpenAIResponsesLanguageModel = class {
2895
3036
  return {
2896
3037
  content,
2897
3038
  finishReason: mapOpenAIResponseFinishReason({
2898
- finishReason: (_q = response.incomplete_details) == null ? void 0 : _q.reason,
3039
+ finishReason: (_t = response.incomplete_details) == null ? void 0 : _t.reason,
2899
3040
  hasFunctionCall
2900
3041
  }),
2901
3042
  usage: {
2902
3043
  inputTokens: response.usage.input_tokens,
2903
3044
  outputTokens: response.usage.output_tokens,
2904
3045
  totalTokens: response.usage.input_tokens + response.usage.output_tokens,
2905
- reasoningTokens: (_s = (_r = response.usage.output_tokens_details) == null ? void 0 : _r.reasoning_tokens) != null ? _s : void 0,
2906
- cachedInputTokens: (_u = (_t = response.usage.input_tokens_details) == null ? void 0 : _t.cached_tokens) != null ? _u : void 0
3046
+ reasoningTokens: (_v = (_u = response.usage.output_tokens_details) == null ? void 0 : _u.reasoning_tokens) != null ? _v : void 0,
3047
+ cachedInputTokens: (_x = (_w = response.usage.input_tokens_details) == null ? void 0 : _w.cached_tokens) != null ? _x : void 0
2907
3048
  },
2908
3049
  request: { body },
2909
3050
  response: {
@@ -2950,6 +3091,7 @@ var OpenAIResponsesLanguageModel = class {
2950
3091
  const logprobs = [];
2951
3092
  let responseId = null;
2952
3093
  const ongoingToolCalls = {};
3094
+ const ongoingAnnotations = [];
2953
3095
  let hasFunctionCall = false;
2954
3096
  const activeReasoning = {};
2955
3097
  let serviceTier;
@@ -2960,7 +3102,7 @@ var OpenAIResponsesLanguageModel = class {
2960
3102
  controller.enqueue({ type: "stream-start", warnings });
2961
3103
  },
2962
3104
  transform(chunk, controller) {
2963
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x, _y;
3105
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x, _y, _z, _A, _B;
2964
3106
  if (options.includeRawChunks) {
2965
3107
  controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
2966
3108
  }
@@ -3001,6 +3143,24 @@ var OpenAIResponsesLanguageModel = class {
3001
3143
  id: value.item.id,
3002
3144
  toolName: "computer_use"
3003
3145
  });
3146
+ } else if (value.item.type === "code_interpreter_call") {
3147
+ ongoingToolCalls[value.output_index] = {
3148
+ toolName: "code_interpreter",
3149
+ toolCallId: value.item.id,
3150
+ codeInterpreter: {
3151
+ containerId: value.item.container_id
3152
+ }
3153
+ };
3154
+ controller.enqueue({
3155
+ type: "tool-input-start",
3156
+ id: value.item.id,
3157
+ toolName: "code_interpreter"
3158
+ });
3159
+ controller.enqueue({
3160
+ type: "tool-input-delta",
3161
+ id: value.item.id,
3162
+ delta: `{"containerId":"${value.item.container_id}","code":"`
3163
+ });
3004
3164
  } else if (value.item.type === "file_search_call") {
3005
3165
  controller.enqueue({
3006
3166
  type: "tool-call",
@@ -3018,6 +3178,7 @@ var OpenAIResponsesLanguageModel = class {
3018
3178
  providerExecuted: true
3019
3179
  });
3020
3180
  } else if (value.item.type === "message") {
3181
+ ongoingAnnotations.splice(0, ongoingAnnotations.length);
3021
3182
  controller.enqueue({
3022
3183
  type: "text-start",
3023
3184
  id: value.item.id,
@@ -3043,7 +3204,7 @@ var OpenAIResponsesLanguageModel = class {
3043
3204
  }
3044
3205
  });
3045
3206
  }
3046
- } else if (isResponseOutputItemDoneChunk(value)) {
3207
+ } else if (isResponseOutputItemDoneChunk(value) && value.item.type !== "message") {
3047
3208
  if (value.item.type === "function_call") {
3048
3209
  ongoingToolCalls[value.output_index] = void 0;
3049
3210
  hasFunctionCall = true;
@@ -3124,16 +3285,7 @@ var OpenAIResponsesLanguageModel = class {
3124
3285
  providerExecuted: true
3125
3286
  });
3126
3287
  } else if (value.item.type === "code_interpreter_call") {
3127
- controller.enqueue({
3128
- type: "tool-call",
3129
- toolCallId: value.item.id,
3130
- toolName: "code_interpreter",
3131
- input: JSON.stringify({
3132
- code: value.item.code,
3133
- containerId: value.item.container_id
3134
- }),
3135
- providerExecuted: true
3136
- });
3288
+ ongoingToolCalls[value.output_index] = void 0;
3137
3289
  controller.enqueue({
3138
3290
  type: "tool-result",
3139
3291
  toolCallId: value.item.id,
@@ -3153,10 +3305,25 @@ var OpenAIResponsesLanguageModel = class {
3153
3305
  },
3154
3306
  providerExecuted: true
3155
3307
  });
3156
- } else if (value.item.type === "message") {
3308
+ } else if (value.item.type === "local_shell_call") {
3309
+ ongoingToolCalls[value.output_index] = void 0;
3157
3310
  controller.enqueue({
3158
- type: "text-end",
3159
- id: value.item.id
3311
+ type: "tool-call",
3312
+ toolCallId: value.item.call_id,
3313
+ toolName: "local_shell",
3314
+ input: JSON.stringify({
3315
+ action: {
3316
+ type: "exec",
3317
+ command: value.item.action.command,
3318
+ timeoutMs: value.item.action.timeout_ms,
3319
+ user: value.item.action.user,
3320
+ workingDirectory: value.item.action.working_directory,
3321
+ env: value.item.action.env
3322
+ }
3323
+ }),
3324
+ providerMetadata: {
3325
+ openai: { itemId: value.item.id }
3326
+ }
3160
3327
  });
3161
3328
  } else if (isResponseOutputItemDoneReasoningChunk(value)) {
3162
3329
  const activeReasoningPart = activeReasoning[value.item.id];
@@ -3183,6 +3350,51 @@ var OpenAIResponsesLanguageModel = class {
3183
3350
  delta: value.delta
3184
3351
  });
3185
3352
  }
3353
+ } else if (isResponseImageGenerationCallPartialImageChunk(value)) {
3354
+ controller.enqueue({
3355
+ type: "tool-result",
3356
+ toolCallId: value.item_id,
3357
+ toolName: "image_generation",
3358
+ result: {
3359
+ result: value.partial_image_b64
3360
+ },
3361
+ providerExecuted: true,
3362
+ preliminary: true
3363
+ });
3364
+ } else if (isResponseCodeInterpreterCallCodeDeltaChunk(value)) {
3365
+ const toolCall = ongoingToolCalls[value.output_index];
3366
+ if (toolCall != null) {
3367
+ controller.enqueue({
3368
+ type: "tool-input-delta",
3369
+ id: toolCall.toolCallId,
3370
+ // The delta is code, which is embedding in a JSON string.
3371
+ // To escape it, we use JSON.stringify and slice to remove the outer quotes.
3372
+ delta: JSON.stringify(value.delta).slice(1, -1)
3373
+ });
3374
+ }
3375
+ } else if (isResponseCodeInterpreterCallCodeDoneChunk(value)) {
3376
+ const toolCall = ongoingToolCalls[value.output_index];
3377
+ if (toolCall != null) {
3378
+ controller.enqueue({
3379
+ type: "tool-input-delta",
3380
+ id: toolCall.toolCallId,
3381
+ delta: '"}'
3382
+ });
3383
+ controller.enqueue({
3384
+ type: "tool-input-end",
3385
+ id: toolCall.toolCallId
3386
+ });
3387
+ controller.enqueue({
3388
+ type: "tool-call",
3389
+ toolCallId: toolCall.toolCallId,
3390
+ toolName: "code_interpreter",
3391
+ input: JSON.stringify({
3392
+ code: value.code,
3393
+ containerId: toolCall.codeInterpreter.containerId
3394
+ }),
3395
+ providerExecuted: true
3396
+ });
3397
+ }
3186
3398
  } else if (isResponseCreatedChunk(value)) {
3187
3399
  responseId = value.response.id;
3188
3400
  controller.enqueue({
@@ -3241,6 +3453,7 @@ var OpenAIResponsesLanguageModel = class {
3241
3453
  serviceTier = value.response.service_tier;
3242
3454
  }
3243
3455
  } else if (isResponseAnnotationAddedChunk(value)) {
3456
+ ongoingAnnotations.push(value.annotation);
3244
3457
  if (value.annotation.type === "url_citation") {
3245
3458
  controller.enqueue({
3246
3459
  type: "source",
@@ -3261,7 +3474,31 @@ var OpenAIResponsesLanguageModel = class {
3261
3474
  startIndex: (_x = value.annotation.start_index) != null ? _x : void 0,
3262
3475
  endIndex: (_y = value.annotation.end_index) != null ? _y : void 0
3263
3476
  });
3477
+ } else if (value.annotation.type === "container_file_citation") {
3478
+ controller.enqueue({
3479
+ type: "source",
3480
+ sourceType: "executionFile",
3481
+ id: (_B = (_A = (_z = self.config).generateId) == null ? void 0 : _A.call(_z)) != null ? _B : generateId2(),
3482
+ providerMetadata: {
3483
+ openai: {
3484
+ containerId: value.annotation.container_id,
3485
+ fileId: value.annotation.file_id,
3486
+ filename: value.annotation.filename
3487
+ }
3488
+ }
3489
+ });
3264
3490
  }
3491
+ } else if (isResponseOutputItemDoneChunk(value) && value.item.type === "message") {
3492
+ controller.enqueue({
3493
+ type: "text-end",
3494
+ id: value.item.id,
3495
+ providerMetadata: {
3496
+ openai: {
3497
+ itemId: value.item.id,
3498
+ annotations: ongoingAnnotations
3499
+ }
3500
+ }
3501
+ });
3265
3502
  } else if (isErrorChunk(value)) {
3266
3503
  controller.enqueue({ type: "error", error: value });
3267
3504
  }
@@ -3292,166 +3529,209 @@ var OpenAIResponsesLanguageModel = class {
3292
3529
  };
3293
3530
  }
3294
3531
  };
3295
- var usageSchema2 = z15.object({
3296
- input_tokens: z15.number(),
3297
- input_tokens_details: z15.object({ cached_tokens: z15.number().nullish() }).nullish(),
3298
- output_tokens: z15.number(),
3299
- output_tokens_details: z15.object({ reasoning_tokens: z15.number().nullish() }).nullish()
3532
+ var usageSchema2 = z16.object({
3533
+ input_tokens: z16.number(),
3534
+ input_tokens_details: z16.object({ cached_tokens: z16.number().nullish() }).nullish(),
3535
+ output_tokens: z16.number(),
3536
+ output_tokens_details: z16.object({ reasoning_tokens: z16.number().nullish() }).nullish()
3300
3537
  });
3301
- var textDeltaChunkSchema = z15.object({
3302
- type: z15.literal("response.output_text.delta"),
3303
- item_id: z15.string(),
3304
- delta: z15.string(),
3538
+ var textDeltaChunkSchema = z16.object({
3539
+ type: z16.literal("response.output_text.delta"),
3540
+ item_id: z16.string(),
3541
+ delta: z16.string(),
3305
3542
  logprobs: LOGPROBS_SCHEMA.nullish()
3306
3543
  });
3307
- var errorChunkSchema = z15.object({
3308
- type: z15.literal("error"),
3309
- code: z15.string(),
3310
- message: z15.string(),
3311
- param: z15.string().nullish(),
3312
- sequence_number: z15.number()
3544
+ var errorChunkSchema = z16.object({
3545
+ type: z16.literal("error"),
3546
+ code: z16.string(),
3547
+ message: z16.string(),
3548
+ param: z16.string().nullish(),
3549
+ sequence_number: z16.number()
3313
3550
  });
3314
- var responseFinishedChunkSchema = z15.object({
3315
- type: z15.enum(["response.completed", "response.incomplete"]),
3316
- response: z15.object({
3317
- incomplete_details: z15.object({ reason: z15.string() }).nullish(),
3551
+ var responseFinishedChunkSchema = z16.object({
3552
+ type: z16.enum(["response.completed", "response.incomplete"]),
3553
+ response: z16.object({
3554
+ incomplete_details: z16.object({ reason: z16.string() }).nullish(),
3318
3555
  usage: usageSchema2,
3319
- service_tier: z15.string().nullish()
3556
+ service_tier: z16.string().nullish()
3320
3557
  })
3321
3558
  });
3322
- var responseCreatedChunkSchema = z15.object({
3323
- type: z15.literal("response.created"),
3324
- response: z15.object({
3325
- id: z15.string(),
3326
- created_at: z15.number(),
3327
- model: z15.string(),
3328
- service_tier: z15.string().nullish()
3559
+ var responseCreatedChunkSchema = z16.object({
3560
+ type: z16.literal("response.created"),
3561
+ response: z16.object({
3562
+ id: z16.string(),
3563
+ created_at: z16.number(),
3564
+ model: z16.string(),
3565
+ service_tier: z16.string().nullish()
3329
3566
  })
3330
3567
  });
3331
- var responseOutputItemAddedSchema = z15.object({
3332
- type: z15.literal("response.output_item.added"),
3333
- output_index: z15.number(),
3334
- item: z15.discriminatedUnion("type", [
3335
- z15.object({
3336
- type: z15.literal("message"),
3337
- id: z15.string()
3568
+ var responseOutputItemAddedSchema = z16.object({
3569
+ type: z16.literal("response.output_item.added"),
3570
+ output_index: z16.number(),
3571
+ item: z16.discriminatedUnion("type", [
3572
+ z16.object({
3573
+ type: z16.literal("message"),
3574
+ id: z16.string()
3338
3575
  }),
3339
- z15.object({
3340
- type: z15.literal("reasoning"),
3341
- id: z15.string(),
3342
- encrypted_content: z15.string().nullish()
3576
+ z16.object({
3577
+ type: z16.literal("reasoning"),
3578
+ id: z16.string(),
3579
+ encrypted_content: z16.string().nullish()
3343
3580
  }),
3344
- z15.object({
3345
- type: z15.literal("function_call"),
3346
- id: z15.string(),
3347
- call_id: z15.string(),
3348
- name: z15.string(),
3349
- arguments: z15.string()
3581
+ z16.object({
3582
+ type: z16.literal("function_call"),
3583
+ id: z16.string(),
3584
+ call_id: z16.string(),
3585
+ name: z16.string(),
3586
+ arguments: z16.string()
3350
3587
  }),
3351
- z15.object({
3352
- type: z15.literal("web_search_call"),
3353
- id: z15.string(),
3354
- status: z15.string(),
3355
- action: z15.object({
3356
- type: z15.literal("search"),
3357
- query: z15.string().optional()
3588
+ z16.object({
3589
+ type: z16.literal("web_search_call"),
3590
+ id: z16.string(),
3591
+ status: z16.string(),
3592
+ action: z16.object({
3593
+ type: z16.literal("search"),
3594
+ query: z16.string().optional()
3358
3595
  }).nullish()
3359
3596
  }),
3360
- z15.object({
3361
- type: z15.literal("computer_call"),
3362
- id: z15.string(),
3363
- status: z15.string()
3597
+ z16.object({
3598
+ type: z16.literal("computer_call"),
3599
+ id: z16.string(),
3600
+ status: z16.string()
3364
3601
  }),
3365
- z15.object({
3366
- type: z15.literal("file_search_call"),
3367
- id: z15.string()
3602
+ z16.object({
3603
+ type: z16.literal("file_search_call"),
3604
+ id: z16.string()
3368
3605
  }),
3369
- z15.object({
3370
- type: z15.literal("image_generation_call"),
3371
- id: z15.string()
3606
+ z16.object({
3607
+ type: z16.literal("image_generation_call"),
3608
+ id: z16.string()
3609
+ }),
3610
+ z16.object({
3611
+ type: z16.literal("code_interpreter_call"),
3612
+ id: z16.string(),
3613
+ container_id: z16.string(),
3614
+ code: z16.string().nullable(),
3615
+ outputs: z16.array(
3616
+ z16.discriminatedUnion("type", [
3617
+ z16.object({ type: z16.literal("logs"), logs: z16.string() }),
3618
+ z16.object({ type: z16.literal("image"), url: z16.string() })
3619
+ ])
3620
+ ).nullable(),
3621
+ status: z16.string()
3372
3622
  })
3373
3623
  ])
3374
3624
  });
3375
- var responseOutputItemDoneSchema = z15.object({
3376
- type: z15.literal("response.output_item.done"),
3377
- output_index: z15.number(),
3378
- item: z15.discriminatedUnion("type", [
3379
- z15.object({
3380
- type: z15.literal("message"),
3381
- id: z15.string()
3625
+ var responseOutputItemDoneSchema = z16.object({
3626
+ type: z16.literal("response.output_item.done"),
3627
+ output_index: z16.number(),
3628
+ item: z16.discriminatedUnion("type", [
3629
+ z16.object({
3630
+ type: z16.literal("message"),
3631
+ id: z16.string()
3382
3632
  }),
3383
- z15.object({
3384
- type: z15.literal("reasoning"),
3385
- id: z15.string(),
3386
- encrypted_content: z15.string().nullish()
3633
+ z16.object({
3634
+ type: z16.literal("reasoning"),
3635
+ id: z16.string(),
3636
+ encrypted_content: z16.string().nullish()
3387
3637
  }),
3388
- z15.object({
3389
- type: z15.literal("function_call"),
3390
- id: z15.string(),
3391
- call_id: z15.string(),
3392
- name: z15.string(),
3393
- arguments: z15.string(),
3394
- status: z15.literal("completed")
3638
+ z16.object({
3639
+ type: z16.literal("function_call"),
3640
+ id: z16.string(),
3641
+ call_id: z16.string(),
3642
+ name: z16.string(),
3643
+ arguments: z16.string(),
3644
+ status: z16.literal("completed")
3395
3645
  }),
3396
3646
  codeInterpreterCallItem,
3397
3647
  imageGenerationCallItem,
3398
3648
  webSearchCallItem,
3399
3649
  fileSearchCallItem,
3400
- z15.object({
3401
- type: z15.literal("computer_call"),
3402
- id: z15.string(),
3403
- status: z15.literal("completed")
3650
+ localShellCallItem,
3651
+ z16.object({
3652
+ type: z16.literal("computer_call"),
3653
+ id: z16.string(),
3654
+ status: z16.literal("completed")
3404
3655
  })
3405
3656
  ])
3406
3657
  });
3407
- var responseFunctionCallArgumentsDeltaSchema = z15.object({
3408
- type: z15.literal("response.function_call_arguments.delta"),
3409
- item_id: z15.string(),
3410
- output_index: z15.number(),
3411
- delta: z15.string()
3658
+ var responseFunctionCallArgumentsDeltaSchema = z16.object({
3659
+ type: z16.literal("response.function_call_arguments.delta"),
3660
+ item_id: z16.string(),
3661
+ output_index: z16.number(),
3662
+ delta: z16.string()
3663
+ });
3664
+ var responseImageGenerationCallPartialImageSchema = z16.object({
3665
+ type: z16.literal("response.image_generation_call.partial_image"),
3666
+ item_id: z16.string(),
3667
+ output_index: z16.number(),
3668
+ partial_image_b64: z16.string()
3669
+ });
3670
+ var responseCodeInterpreterCallCodeDeltaSchema = z16.object({
3671
+ type: z16.literal("response.code_interpreter_call_code.delta"),
3672
+ item_id: z16.string(),
3673
+ output_index: z16.number(),
3674
+ delta: z16.string()
3412
3675
  });
3413
- var responseAnnotationAddedSchema = z15.object({
3414
- type: z15.literal("response.output_text.annotation.added"),
3415
- annotation: z15.discriminatedUnion("type", [
3416
- z15.object({
3417
- type: z15.literal("url_citation"),
3418
- url: z15.string(),
3419
- title: z15.string()
3676
+ var responseCodeInterpreterCallCodeDoneSchema = z16.object({
3677
+ type: z16.literal("response.code_interpreter_call_code.done"),
3678
+ item_id: z16.string(),
3679
+ output_index: z16.number(),
3680
+ code: z16.string()
3681
+ });
3682
+ var responseAnnotationAddedSchema = z16.object({
3683
+ type: z16.literal("response.output_text.annotation.added"),
3684
+ annotation: z16.discriminatedUnion("type", [
3685
+ z16.object({
3686
+ type: z16.literal("url_citation"),
3687
+ url: z16.string(),
3688
+ title: z16.string()
3689
+ }),
3690
+ z16.object({
3691
+ type: z16.literal("file_citation"),
3692
+ file_id: z16.string(),
3693
+ filename: z16.string().nullish(),
3694
+ index: z16.number().nullish(),
3695
+ start_index: z16.number().nullish(),
3696
+ end_index: z16.number().nullish(),
3697
+ quote: z16.string().nullish()
3420
3698
  }),
3421
- z15.object({
3422
- type: z15.literal("file_citation"),
3423
- file_id: z15.string(),
3424
- filename: z15.string().nullish(),
3425
- index: z15.number().nullish(),
3426
- start_index: z15.number().nullish(),
3427
- end_index: z15.number().nullish(),
3428
- quote: z15.string().nullish()
3699
+ z16.object({
3700
+ type: z16.literal("container_file_citation"),
3701
+ container_id: z16.string(),
3702
+ end_index: z16.number(),
3703
+ file_id: z16.string(),
3704
+ filename: z16.string(),
3705
+ start_index: z16.number()
3429
3706
  })
3430
3707
  ])
3431
3708
  });
3432
- var responseReasoningSummaryPartAddedSchema = z15.object({
3433
- type: z15.literal("response.reasoning_summary_part.added"),
3434
- item_id: z15.string(),
3435
- summary_index: z15.number()
3709
+ var responseReasoningSummaryPartAddedSchema = z16.object({
3710
+ type: z16.literal("response.reasoning_summary_part.added"),
3711
+ item_id: z16.string(),
3712
+ summary_index: z16.number()
3436
3713
  });
3437
- var responseReasoningSummaryTextDeltaSchema = z15.object({
3438
- type: z15.literal("response.reasoning_summary_text.delta"),
3439
- item_id: z15.string(),
3440
- summary_index: z15.number(),
3441
- delta: z15.string()
3714
+ var responseReasoningSummaryTextDeltaSchema = z16.object({
3715
+ type: z16.literal("response.reasoning_summary_text.delta"),
3716
+ item_id: z16.string(),
3717
+ summary_index: z16.number(),
3718
+ delta: z16.string()
3442
3719
  });
3443
- var openaiResponsesChunkSchema = z15.union([
3720
+ var openaiResponsesChunkSchema = z16.union([
3444
3721
  textDeltaChunkSchema,
3445
3722
  responseFinishedChunkSchema,
3446
3723
  responseCreatedChunkSchema,
3447
3724
  responseOutputItemAddedSchema,
3448
3725
  responseOutputItemDoneSchema,
3449
3726
  responseFunctionCallArgumentsDeltaSchema,
3727
+ responseImageGenerationCallPartialImageSchema,
3728
+ responseCodeInterpreterCallCodeDeltaSchema,
3729
+ responseCodeInterpreterCallCodeDoneSchema,
3450
3730
  responseAnnotationAddedSchema,
3451
3731
  responseReasoningSummaryPartAddedSchema,
3452
3732
  responseReasoningSummaryTextDeltaSchema,
3453
3733
  errorChunkSchema,
3454
- z15.object({ type: z15.string() }).loose()
3734
+ z16.object({ type: z16.string() }).loose()
3455
3735
  // fallback for unknown chunks
3456
3736
  ]);
3457
3737
  function isTextDeltaChunk(chunk) {
@@ -3472,6 +3752,15 @@ function isResponseCreatedChunk(chunk) {
3472
3752
  function isResponseFunctionCallArgumentsDeltaChunk(chunk) {
3473
3753
  return chunk.type === "response.function_call_arguments.delta";
3474
3754
  }
3755
+ function isResponseImageGenerationCallPartialImageChunk(chunk) {
3756
+ return chunk.type === "response.image_generation_call.partial_image";
3757
+ }
3758
+ function isResponseCodeInterpreterCallCodeDeltaChunk(chunk) {
3759
+ return chunk.type === "response.code_interpreter_call_code.delta";
3760
+ }
3761
+ function isResponseCodeInterpreterCallCodeDoneChunk(chunk) {
3762
+ return chunk.type === "response.code_interpreter_call_code.done";
3763
+ }
3475
3764
  function isResponseOutputItemAddedChunk(chunk) {
3476
3765
  return chunk.type === "response.output_item.added";
3477
3766
  }
@@ -3524,15 +3813,15 @@ function getResponsesModelConfig(modelId) {
3524
3813
  isReasoningModel: false
3525
3814
  };
3526
3815
  }
3527
- var openaiResponsesProviderOptionsSchema = z15.object({
3528
- include: z15.array(
3529
- z15.enum([
3816
+ var openaiResponsesProviderOptionsSchema = z16.object({
3817
+ include: z16.array(
3818
+ z16.enum([
3530
3819
  "reasoning.encrypted_content",
3531
3820
  "file_search_call.results",
3532
3821
  "message.output_text.logprobs"
3533
3822
  ])
3534
3823
  ).nullish(),
3535
- instructions: z15.string().nullish(),
3824
+ instructions: z16.string().nullish(),
3536
3825
  /**
3537
3826
  * Return the log probabilities of the tokens.
3538
3827
  *
@@ -3545,25 +3834,34 @@ var openaiResponsesProviderOptionsSchema = z15.object({
3545
3834
  * @see https://platform.openai.com/docs/api-reference/responses/create
3546
3835
  * @see https://cookbook.openai.com/examples/using_logprobs
3547
3836
  */
3548
- logprobs: z15.union([z15.boolean(), z15.number().min(1).max(TOP_LOGPROBS_MAX)]).optional(),
3837
+ logprobs: z16.union([z16.boolean(), z16.number().min(1).max(TOP_LOGPROBS_MAX)]).optional(),
3549
3838
  /**
3550
3839
  * The maximum number of total calls to built-in tools that can be processed in a response.
3551
3840
  * This maximum number applies across all built-in tool calls, not per individual tool.
3552
3841
  * Any further attempts to call a tool by the model will be ignored.
3553
3842
  */
3554
- maxToolCalls: z15.number().nullish(),
3555
- metadata: z15.any().nullish(),
3556
- parallelToolCalls: z15.boolean().nullish(),
3557
- previousResponseId: z15.string().nullish(),
3558
- promptCacheKey: z15.string().nullish(),
3559
- reasoningEffort: z15.string().nullish(),
3560
- reasoningSummary: z15.string().nullish(),
3561
- safetyIdentifier: z15.string().nullish(),
3562
- serviceTier: z15.enum(["auto", "flex", "priority"]).nullish(),
3563
- store: z15.boolean().nullish(),
3564
- strictJsonSchema: z15.boolean().nullish(),
3565
- textVerbosity: z15.enum(["low", "medium", "high"]).nullish(),
3566
- user: z15.string().nullish()
3843
+ maxToolCalls: z16.number().nullish(),
3844
+ metadata: z16.any().nullish(),
3845
+ parallelToolCalls: z16.boolean().nullish(),
3846
+ previousResponseId: z16.string().nullish(),
3847
+ promptCacheKey: z16.string().nullish(),
3848
+ reasoningEffort: z16.string().nullish(),
3849
+ reasoningSummary: z16.string().nullish(),
3850
+ safetyIdentifier: z16.string().nullish(),
3851
+ serviceTier: z16.enum(["auto", "flex", "priority"]).nullish(),
3852
+ store: z16.boolean().nullish(),
3853
+ strictJsonSchema: z16.boolean().nullish(),
3854
+ textVerbosity: z16.enum(["low", "medium", "high"]).nullish(),
3855
+ user: z16.string().nullish()
3856
+ });
3857
+ var openaiResponsesTextUIPartProviderMetadataSchema = z16.object({
3858
+ openai: z16.object({
3859
+ itemId: z16.string(),
3860
+ annotations: z16.array(responseAnnotationAddedSchema.shape.annotation)
3861
+ })
3862
+ });
3863
+ var openaiSourceExecutionFileProviderMetadataSchema = z16.object({
3864
+ openai: sourceExecutionFileCodeInterpreterItem
3567
3865
  });
3568
3866
 
3569
3867
  // src/speech/openai-speech-model.ts
@@ -3572,11 +3870,11 @@ import {
3572
3870
  createBinaryResponseHandler,
3573
3871
  parseProviderOptions as parseProviderOptions6,
3574
3872
  postJsonToApi as postJsonToApi6
3575
- } from "@ai-sdk/provider-utils";
3576
- import { z as z16 } from "zod/v4";
3577
- var OpenAIProviderOptionsSchema = z16.object({
3578
- instructions: z16.string().nullish(),
3579
- speed: z16.number().min(0.25).max(4).default(1).nullish()
3873
+ } from "@zenning/provider-utils";
3874
+ import { z as z17 } from "zod/v4";
3875
+ var OpenAIProviderOptionsSchema = z17.object({
3876
+ instructions: z17.string().nullish(),
3877
+ speed: z17.number().min(0.25).max(4).default(1).nullish()
3580
3878
  });
3581
3879
  var OpenAISpeechModel = class {
3582
3880
  constructor(modelId, config) {
@@ -3686,34 +3984,34 @@ import {
3686
3984
  mediaTypeToExtension,
3687
3985
  parseProviderOptions as parseProviderOptions7,
3688
3986
  postFormDataToApi
3689
- } from "@ai-sdk/provider-utils";
3690
- import { z as z18 } from "zod/v4";
3987
+ } from "@zenning/provider-utils";
3988
+ import { z as z19 } from "zod/v4";
3691
3989
 
3692
3990
  // src/transcription/openai-transcription-options.ts
3693
- import { z as z17 } from "zod/v4";
3694
- var openAITranscriptionProviderOptions = z17.object({
3991
+ import { z as z18 } from "zod/v4";
3992
+ var openAITranscriptionProviderOptions = z18.object({
3695
3993
  /**
3696
3994
  * Additional information to include in the transcription response.
3697
3995
  */
3698
- include: z17.array(z17.string()).optional(),
3996
+ include: z18.array(z18.string()).optional(),
3699
3997
  /**
3700
3998
  * The language of the input audio in ISO-639-1 format.
3701
3999
  */
3702
- language: z17.string().optional(),
4000
+ language: z18.string().optional(),
3703
4001
  /**
3704
4002
  * An optional text to guide the model's style or continue a previous audio segment.
3705
4003
  */
3706
- prompt: z17.string().optional(),
4004
+ prompt: z18.string().optional(),
3707
4005
  /**
3708
4006
  * The sampling temperature, between 0 and 1.
3709
4007
  * @default 0
3710
4008
  */
3711
- temperature: z17.number().min(0).max(1).default(0).optional(),
4009
+ temperature: z18.number().min(0).max(1).default(0).optional(),
3712
4010
  /**
3713
4011
  * The timestamp granularities to populate for this transcription.
3714
4012
  * @default ['segment']
3715
4013
  */
3716
- timestampGranularities: z17.array(z17.enum(["word", "segment"])).default(["segment"]).optional()
4014
+ timestampGranularities: z18.array(z18.enum(["word", "segment"])).default(["segment"]).optional()
3717
4015
  });
3718
4016
 
3719
4017
  // src/transcription/openai-transcription-model.ts
@@ -3882,48 +4180,59 @@ var OpenAITranscriptionModel = class {
3882
4180
  };
3883
4181
  }
3884
4182
  };
3885
- var openaiTranscriptionResponseSchema = z18.object({
3886
- text: z18.string(),
3887
- language: z18.string().nullish(),
3888
- duration: z18.number().nullish(),
3889
- words: z18.array(
3890
- z18.object({
3891
- word: z18.string(),
3892
- start: z18.number(),
3893
- end: z18.number()
4183
+ var openaiTranscriptionResponseSchema = z19.object({
4184
+ text: z19.string(),
4185
+ language: z19.string().nullish(),
4186
+ duration: z19.number().nullish(),
4187
+ words: z19.array(
4188
+ z19.object({
4189
+ word: z19.string(),
4190
+ start: z19.number(),
4191
+ end: z19.number()
3894
4192
  })
3895
4193
  ).nullish(),
3896
- segments: z18.array(
3897
- z18.object({
3898
- id: z18.number(),
3899
- seek: z18.number(),
3900
- start: z18.number(),
3901
- end: z18.number(),
3902
- text: z18.string(),
3903
- tokens: z18.array(z18.number()),
3904
- temperature: z18.number(),
3905
- avg_logprob: z18.number(),
3906
- compression_ratio: z18.number(),
3907
- no_speech_prob: z18.number()
4194
+ segments: z19.array(
4195
+ z19.object({
4196
+ id: z19.number(),
4197
+ seek: z19.number(),
4198
+ start: z19.number(),
4199
+ end: z19.number(),
4200
+ text: z19.string(),
4201
+ tokens: z19.array(z19.number()),
4202
+ temperature: z19.number(),
4203
+ avg_logprob: z19.number(),
4204
+ compression_ratio: z19.number(),
4205
+ no_speech_prob: z19.number()
3908
4206
  })
3909
4207
  ).nullish()
3910
4208
  });
3911
4209
 
4210
+ // src/version.ts
4211
+ var VERSION = true ? "2.3.0" : "0.0.0-test";
4212
+
3912
4213
  // src/openai-provider.ts
3913
4214
  function createOpenAI(options = {}) {
3914
4215
  var _a, _b;
3915
- const baseURL = (_a = withoutTrailingSlash(options.baseURL)) != null ? _a : "https://api.openai.com/v1";
4216
+ const baseURL = (_a = withoutTrailingSlash(
4217
+ loadOptionalSetting({
4218
+ settingValue: options.baseURL,
4219
+ environmentVariableName: "OPENAI_BASE_URL"
4220
+ })
4221
+ )) != null ? _a : "https://api.openai.com/v1";
3916
4222
  const providerName = (_b = options.name) != null ? _b : "openai";
3917
- const getHeaders = () => ({
3918
- Authorization: `Bearer ${loadApiKey({
3919
- apiKey: options.apiKey,
3920
- environmentVariableName: "OPENAI_API_KEY",
3921
- description: "OpenAI"
3922
- })}`,
3923
- "OpenAI-Organization": options.organization,
3924
- "OpenAI-Project": options.project,
3925
- ...options.headers
3926
- });
4223
+ const getHeaders = () => withUserAgentSuffix(
4224
+ {
4225
+ Authorization: `Bearer ${loadApiKey({
4226
+ apiKey: options.apiKey,
4227
+ environmentVariableName: "OPENAI_API_KEY",
4228
+ description: "OpenAI"
4229
+ })}`,
4230
+ "OpenAI-Organization": options.organization,
4231
+ "OpenAI-Project": options.project,
4232
+ ...options.headers
4233
+ },
4234
+ `ai-sdk/openai/${VERSION}`
4235
+ );
3927
4236
  const createChatModel = (modelId) => new OpenAIChatLanguageModel(modelId, {
3928
4237
  provider: `${providerName}.chat`,
3929
4238
  url: ({ path }) => `${baseURL}${path}`,
@@ -3998,6 +4307,7 @@ function createOpenAI(options = {}) {
3998
4307
  }
3999
4308
  var openai = createOpenAI();
4000
4309
  export {
4310
+ VERSION,
4001
4311
  createOpenAI,
4002
4312
  openai
4003
4313
  };