@ai-sdk/openai 1.2.0 → 1.2.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -26,7 +26,7 @@ __export(src_exports, {
26
26
  module.exports = __toCommonJS(src_exports);
27
27
 
28
28
  // src/openai-provider.ts
29
- var import_provider_utils7 = require("@ai-sdk/provider-utils");
29
+ var import_provider_utils9 = require("@ai-sdk/provider-utils");
30
30
 
31
31
  // src/openai-chat-language-model.ts
32
32
  var import_provider3 = require("@ai-sdk/provider");
@@ -42,6 +42,7 @@ function convertToOpenAIChatMessages({
42
42
  systemMessageMode = "system"
43
43
  }) {
44
44
  const messages = [];
45
+ const warnings = [];
45
46
  for (const { role, content } of prompt) {
46
47
  switch (role) {
47
48
  case "system": {
@@ -55,6 +56,10 @@ function convertToOpenAIChatMessages({
55
56
  break;
56
57
  }
57
58
  case "remove": {
59
+ warnings.push({
60
+ type: "other",
61
+ message: "system messages are removed for this model"
62
+ });
58
63
  break;
59
64
  }
60
65
  default: {
@@ -195,7 +200,7 @@ function convertToOpenAIChatMessages({
195
200
  }
196
201
  }
197
202
  }
198
- return messages;
203
+ return { messages, warnings };
199
204
  }
200
205
 
201
206
  // src/map-openai-chat-logprobs.ts
@@ -316,12 +321,12 @@ function prepareTools({
316
321
  };
317
322
  }
318
323
  }
319
- const openaiTools = [];
324
+ const openaiTools2 = [];
320
325
  for (const tool of tools) {
321
326
  if (tool.type === "provider-defined") {
322
327
  toolWarnings.push({ type: "unsupported-tool", tool });
323
328
  } else {
324
- openaiTools.push({
329
+ openaiTools2.push({
325
330
  type: "function",
326
331
  function: {
327
332
  name: tool.name,
@@ -333,17 +338,17 @@ function prepareTools({
333
338
  }
334
339
  }
335
340
  if (toolChoice == null) {
336
- return { tools: openaiTools, tool_choice: void 0, toolWarnings };
341
+ return { tools: openaiTools2, tool_choice: void 0, toolWarnings };
337
342
  }
338
343
  const type = toolChoice.type;
339
344
  switch (type) {
340
345
  case "auto":
341
346
  case "none":
342
347
  case "required":
343
- return { tools: openaiTools, tool_choice: type, toolWarnings };
348
+ return { tools: openaiTools2, tool_choice: type, toolWarnings };
344
349
  case "tool":
345
350
  return {
346
- tools: openaiTools,
351
+ tools: openaiTools2,
347
352
  tool_choice: {
348
353
  type: "function",
349
354
  function: {
@@ -426,12 +431,14 @@ var OpenAIChatLanguageModel = class {
426
431
  functionality: "structuredOutputs with useLegacyFunctionCalling"
427
432
  });
428
433
  }
429
- if (getSystemMessageMode(this.modelId) === "remove" && prompt.some((message) => message.role === "system")) {
430
- warnings.push({
431
- type: "other",
432
- message: "system messages are removed for this model"
433
- });
434
- }
434
+ const { messages, warnings: messageWarnings } = convertToOpenAIChatMessages(
435
+ {
436
+ prompt,
437
+ useLegacyFunctionCalling,
438
+ systemMessageMode: getSystemMessageMode(this.modelId)
439
+ }
440
+ );
441
+ warnings.push(...messageWarnings);
435
442
  const baseArgs = {
436
443
  // model id:
437
444
  model: this.modelId,
@@ -466,11 +473,7 @@ var OpenAIChatLanguageModel = class {
466
473
  prediction: (_e = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _e.prediction,
467
474
  reasoning_effort: (_g = (_f = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _f.reasoningEffort) != null ? _g : this.settings.reasoningEffort,
468
475
  // messages:
469
- messages: convertToOpenAIChatMessages({
470
- prompt,
471
- useLegacyFunctionCalling,
472
- systemMessageMode: getSystemMessageMode(this.modelId)
473
- })
476
+ messages
474
477
  };
475
478
  if (isReasoningModel(this.modelId)) {
476
479
  if (baseArgs.temperature != null) {
@@ -612,7 +615,11 @@ var OpenAIChatLanguageModel = class {
612
615
  async doGenerate(options) {
613
616
  var _a, _b, _c, _d, _e, _f, _g, _h;
614
617
  const { args: body, warnings } = this.getArgs(options);
615
- const { responseHeaders, value: response } = await (0, import_provider_utils3.postJsonToApi)({
618
+ const {
619
+ responseHeaders,
620
+ value: response,
621
+ rawValue: rawResponse
622
+ } = await (0, import_provider_utils3.postJsonToApi)({
616
623
  url: this.config.url({
617
624
  path: "/chat/completions",
618
625
  modelId: this.modelId
@@ -667,7 +674,7 @@ var OpenAIChatLanguageModel = class {
667
674
  completionTokens: (_h = (_g = response.usage) == null ? void 0 : _g.completion_tokens) != null ? _h : NaN
668
675
  },
669
676
  rawCall: { rawPrompt, rawSettings },
670
- rawResponse: { headers: responseHeaders },
677
+ rawResponse: { headers: responseHeaders, body: rawResponse },
671
678
  request: { body: JSON.stringify(body) },
672
679
  response: getResponseMetadata(response),
673
680
  warnings,
@@ -1269,7 +1276,11 @@ var OpenAICompletionLanguageModel = class {
1269
1276
  }
1270
1277
  async doGenerate(options) {
1271
1278
  const { args, warnings } = this.getArgs(options);
1272
- const { responseHeaders, value: response } = await (0, import_provider_utils4.postJsonToApi)({
1279
+ const {
1280
+ responseHeaders,
1281
+ value: response,
1282
+ rawValue: rawResponse
1283
+ } = await (0, import_provider_utils4.postJsonToApi)({
1273
1284
  url: this.config.url({
1274
1285
  path: "/completions",
1275
1286
  modelId: this.modelId
@@ -1294,7 +1305,7 @@ var OpenAICompletionLanguageModel = class {
1294
1305
  finishReason: mapOpenAIFinishReason(choice.finish_reason),
1295
1306
  logprobs: mapOpenAICompletionLogProbs(choice.logprobs),
1296
1307
  rawCall: { rawPrompt, rawSettings },
1297
- rawResponse: { headers: responseHeaders },
1308
+ rawResponse: { headers: responseHeaders, body: rawResponse },
1298
1309
  response: getResponseMetadata(response),
1299
1310
  warnings,
1300
1311
  request: { body: JSON.stringify(args) }
@@ -1589,14 +1600,786 @@ var openaiImageResponseSchema = import_zod5.z.object({
1589
1600
  data: import_zod5.z.array(import_zod5.z.object({ b64_json: import_zod5.z.string() }))
1590
1601
  });
1591
1602
 
1603
+ // src/responses/openai-responses-language-model.ts
1604
+ var import_provider_utils8 = require("@ai-sdk/provider-utils");
1605
+ var import_zod6 = require("zod");
1606
+
1607
+ // src/responses/convert-to-openai-responses-messages.ts
1608
+ var import_provider7 = require("@ai-sdk/provider");
1609
+ var import_provider_utils7 = require("@ai-sdk/provider-utils");
1610
+ function convertToOpenAIResponsesMessages({
1611
+ prompt,
1612
+ systemMessageMode
1613
+ }) {
1614
+ const messages = [];
1615
+ const warnings = [];
1616
+ for (const { role, content } of prompt) {
1617
+ switch (role) {
1618
+ case "system": {
1619
+ switch (systemMessageMode) {
1620
+ case "system": {
1621
+ messages.push({ role: "system", content });
1622
+ break;
1623
+ }
1624
+ case "developer": {
1625
+ messages.push({ role: "developer", content });
1626
+ break;
1627
+ }
1628
+ case "remove": {
1629
+ warnings.push({
1630
+ type: "other",
1631
+ message: "system messages are removed for this model"
1632
+ });
1633
+ break;
1634
+ }
1635
+ default: {
1636
+ const _exhaustiveCheck = systemMessageMode;
1637
+ throw new Error(
1638
+ `Unsupported system message mode: ${_exhaustiveCheck}`
1639
+ );
1640
+ }
1641
+ }
1642
+ break;
1643
+ }
1644
+ case "user": {
1645
+ messages.push({
1646
+ role: "user",
1647
+ content: content.map((part) => {
1648
+ var _a, _b, _c;
1649
+ switch (part.type) {
1650
+ case "text": {
1651
+ return { type: "input_text", text: part.text };
1652
+ }
1653
+ case "image": {
1654
+ return {
1655
+ type: "input_image",
1656
+ image_url: part.image instanceof URL ? part.image.toString() : `data:${(_a = part.mimeType) != null ? _a : "image/jpeg"};base64,${(0, import_provider_utils7.convertUint8ArrayToBase64)(part.image)}`,
1657
+ // OpenAI specific extension: image detail
1658
+ detail: (_c = (_b = part.providerMetadata) == null ? void 0 : _b.openai) == null ? void 0 : _c.imageDetail
1659
+ };
1660
+ }
1661
+ case "file": {
1662
+ throw new import_provider7.UnsupportedFunctionalityError({
1663
+ functionality: "Image content parts in user messages"
1664
+ });
1665
+ }
1666
+ }
1667
+ })
1668
+ });
1669
+ break;
1670
+ }
1671
+ case "assistant": {
1672
+ for (const part of content) {
1673
+ switch (part.type) {
1674
+ case "text": {
1675
+ messages.push({
1676
+ role: "assistant",
1677
+ content: [{ type: "output_text", text: part.text }]
1678
+ });
1679
+ break;
1680
+ }
1681
+ case "tool-call": {
1682
+ messages.push({
1683
+ type: "function_call",
1684
+ call_id: part.toolCallId,
1685
+ name: part.toolName,
1686
+ arguments: JSON.stringify(part.args)
1687
+ });
1688
+ break;
1689
+ }
1690
+ }
1691
+ }
1692
+ break;
1693
+ }
1694
+ case "tool": {
1695
+ for (const part of content) {
1696
+ messages.push({
1697
+ type: "function_call_output",
1698
+ call_id: part.toolCallId,
1699
+ output: JSON.stringify(part.result)
1700
+ });
1701
+ }
1702
+ break;
1703
+ }
1704
+ default: {
1705
+ const _exhaustiveCheck = role;
1706
+ throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
1707
+ }
1708
+ }
1709
+ }
1710
+ return { messages, warnings };
1711
+ }
1712
+
1713
+ // src/responses/map-openai-responses-finish-reason.ts
1714
+ function mapOpenAIResponseFinishReason({
1715
+ finishReason,
1716
+ hasToolCalls
1717
+ }) {
1718
+ switch (finishReason) {
1719
+ case void 0:
1720
+ case null:
1721
+ return hasToolCalls ? "tool-calls" : "stop";
1722
+ case "max_output_tokens":
1723
+ return "length";
1724
+ case "content_filter":
1725
+ return "content-filter";
1726
+ default:
1727
+ return hasToolCalls ? "tool-calls" : "unknown";
1728
+ }
1729
+ }
1730
+
1731
+ // src/responses/openai-responses-prepare-tools.ts
1732
+ var import_provider8 = require("@ai-sdk/provider");
1733
+ function prepareResponsesTools({
1734
+ mode,
1735
+ strict
1736
+ }) {
1737
+ var _a;
1738
+ const tools = ((_a = mode.tools) == null ? void 0 : _a.length) ? mode.tools : void 0;
1739
+ const toolWarnings = [];
1740
+ if (tools == null) {
1741
+ return { tools: void 0, tool_choice: void 0, toolWarnings };
1742
+ }
1743
+ const toolChoice = mode.toolChoice;
1744
+ const openaiTools2 = [];
1745
+ for (const tool of tools) {
1746
+ switch (tool.type) {
1747
+ case "function":
1748
+ openaiTools2.push({
1749
+ type: "function",
1750
+ name: tool.name,
1751
+ description: tool.description,
1752
+ parameters: tool.parameters,
1753
+ strict: strict ? true : void 0
1754
+ });
1755
+ break;
1756
+ case "provider-defined":
1757
+ switch (tool.id) {
1758
+ case "openai.web_search_preview":
1759
+ openaiTools2.push({
1760
+ type: "web_search_preview",
1761
+ search_context_size: tool.args.searchContextSize,
1762
+ user_location: tool.args.userLocation
1763
+ });
1764
+ break;
1765
+ default:
1766
+ toolWarnings.push({ type: "unsupported-tool", tool });
1767
+ break;
1768
+ }
1769
+ break;
1770
+ default:
1771
+ toolWarnings.push({ type: "unsupported-tool", tool });
1772
+ break;
1773
+ }
1774
+ }
1775
+ if (toolChoice == null) {
1776
+ return { tools: openaiTools2, tool_choice: void 0, toolWarnings };
1777
+ }
1778
+ const type = toolChoice.type;
1779
+ switch (type) {
1780
+ case "auto":
1781
+ case "none":
1782
+ case "required":
1783
+ return { tools: openaiTools2, tool_choice: type, toolWarnings };
1784
+ case "tool":
1785
+ return {
1786
+ tools: openaiTools2,
1787
+ tool_choice: {
1788
+ type: "function",
1789
+ name: toolChoice.toolName
1790
+ },
1791
+ toolWarnings
1792
+ };
1793
+ default: {
1794
+ const _exhaustiveCheck = type;
1795
+ throw new import_provider8.UnsupportedFunctionalityError({
1796
+ functionality: `Unsupported tool choice type: ${_exhaustiveCheck}`
1797
+ });
1798
+ }
1799
+ }
1800
+ }
1801
+
1802
+ // src/responses/openai-responses-language-model.ts
1803
+ var OpenAIResponsesLanguageModel = class {
1804
+ constructor(modelId, config) {
1805
+ this.specificationVersion = "v1";
1806
+ this.defaultObjectGenerationMode = "json";
1807
+ this.modelId = modelId;
1808
+ this.config = config;
1809
+ }
1810
+ get provider() {
1811
+ return this.config.provider;
1812
+ }
1813
+ getArgs({
1814
+ mode,
1815
+ maxTokens,
1816
+ temperature,
1817
+ stopSequences,
1818
+ topP,
1819
+ topK,
1820
+ presencePenalty,
1821
+ frequencyPenalty,
1822
+ seed,
1823
+ prompt,
1824
+ providerMetadata,
1825
+ responseFormat
1826
+ }) {
1827
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k;
1828
+ const warnings = [];
1829
+ const modelConfig = getResponsesModelConfig(this.modelId);
1830
+ const type = mode.type;
1831
+ if (topK != null) {
1832
+ warnings.push({
1833
+ type: "unsupported-setting",
1834
+ setting: "topK"
1835
+ });
1836
+ }
1837
+ if (seed != null) {
1838
+ warnings.push({
1839
+ type: "unsupported-setting",
1840
+ setting: "seed"
1841
+ });
1842
+ }
1843
+ if (presencePenalty != null) {
1844
+ warnings.push({
1845
+ type: "unsupported-setting",
1846
+ setting: "presencePenalty"
1847
+ });
1848
+ }
1849
+ if (frequencyPenalty != null) {
1850
+ warnings.push({
1851
+ type: "unsupported-setting",
1852
+ setting: "frequencyPenalty"
1853
+ });
1854
+ }
1855
+ if (stopSequences != null) {
1856
+ warnings.push({
1857
+ type: "unsupported-setting",
1858
+ setting: "stopSequences"
1859
+ });
1860
+ }
1861
+ const { messages, warnings: messageWarnings } = convertToOpenAIResponsesMessages({
1862
+ prompt,
1863
+ systemMessageMode: modelConfig.systemMessageMode
1864
+ });
1865
+ warnings.push(...messageWarnings);
1866
+ const isStrictJsonSchema = (_b = (_a = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _a.strictJsonSchema) != null ? _b : true;
1867
+ const baseArgs = {
1868
+ model: this.modelId,
1869
+ input: messages,
1870
+ temperature,
1871
+ top_p: topP,
1872
+ max_output_tokens: maxTokens,
1873
+ ...(responseFormat == null ? void 0 : responseFormat.type) === "json" && {
1874
+ text: {
1875
+ format: responseFormat.schema != null ? {
1876
+ type: "json_schema",
1877
+ strict: isStrictJsonSchema,
1878
+ name: (_c = responseFormat.name) != null ? _c : "response",
1879
+ description: responseFormat.description,
1880
+ schema: responseFormat.schema
1881
+ } : { type: "json_object" }
1882
+ }
1883
+ },
1884
+ // provider options:
1885
+ metadata: (_d = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _d.metadata,
1886
+ parallel_tool_calls: (_e = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _e.parallelToolCalls,
1887
+ previous_response_id: (_f = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _f.previousResponseId,
1888
+ store: (_g = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _g.store,
1889
+ user: (_h = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _h.user,
1890
+ // model-specific settings:
1891
+ ...modelConfig.isReasoningModel && ((_i = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _i.reasoningEffort) != null && {
1892
+ reasoning: { effort: (_j = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _j.reasoningEffort }
1893
+ },
1894
+ ...modelConfig.requiredAutoTruncation && {
1895
+ truncation: "auto"
1896
+ }
1897
+ };
1898
+ if (modelConfig.isReasoningModel) {
1899
+ if (baseArgs.temperature != null) {
1900
+ baseArgs.temperature = void 0;
1901
+ warnings.push({
1902
+ type: "unsupported-setting",
1903
+ setting: "temperature",
1904
+ details: "temperature is not supported for reasoning models"
1905
+ });
1906
+ }
1907
+ if (baseArgs.top_p != null) {
1908
+ baseArgs.top_p = void 0;
1909
+ warnings.push({
1910
+ type: "unsupported-setting",
1911
+ setting: "topP",
1912
+ details: "topP is not supported for reasoning models"
1913
+ });
1914
+ }
1915
+ }
1916
+ switch (type) {
1917
+ case "regular": {
1918
+ const { tools, tool_choice, toolWarnings } = prepareResponsesTools({
1919
+ mode,
1920
+ strict: true
1921
+ });
1922
+ return {
1923
+ args: {
1924
+ ...baseArgs,
1925
+ tools,
1926
+ tool_choice
1927
+ },
1928
+ warnings: [...warnings, ...toolWarnings]
1929
+ };
1930
+ }
1931
+ case "object-json": {
1932
+ return {
1933
+ args: {
1934
+ ...baseArgs,
1935
+ text: {
1936
+ format: mode.schema != null ? {
1937
+ type: "json_schema",
1938
+ strict: isStrictJsonSchema,
1939
+ name: (_k = mode.name) != null ? _k : "response",
1940
+ description: mode.description,
1941
+ schema: mode.schema
1942
+ } : { type: "json_object" }
1943
+ }
1944
+ },
1945
+ warnings
1946
+ };
1947
+ }
1948
+ case "object-tool": {
1949
+ return {
1950
+ args: {
1951
+ ...baseArgs,
1952
+ tool_choice: { type: "function", name: mode.tool.name },
1953
+ tools: [
1954
+ {
1955
+ type: "function",
1956
+ name: mode.tool.name,
1957
+ description: mode.tool.description,
1958
+ parameters: mode.tool.parameters,
1959
+ strict: isStrictJsonSchema
1960
+ }
1961
+ ]
1962
+ },
1963
+ warnings
1964
+ };
1965
+ }
1966
+ default: {
1967
+ const _exhaustiveCheck = type;
1968
+ throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
1969
+ }
1970
+ }
1971
+ }
1972
+ async doGenerate(options) {
1973
+ var _a, _b, _c, _d, _e;
1974
+ const { args: body, warnings } = this.getArgs(options);
1975
+ const {
1976
+ responseHeaders,
1977
+ value: response,
1978
+ rawValue: rawResponse
1979
+ } = await (0, import_provider_utils8.postJsonToApi)({
1980
+ url: this.config.url({
1981
+ path: "/responses",
1982
+ modelId: this.modelId
1983
+ }),
1984
+ headers: (0, import_provider_utils8.combineHeaders)(this.config.headers(), options.headers),
1985
+ body,
1986
+ failedResponseHandler: openaiFailedResponseHandler,
1987
+ successfulResponseHandler: (0, import_provider_utils8.createJsonResponseHandler)(
1988
+ import_zod6.z.object({
1989
+ id: import_zod6.z.string(),
1990
+ created_at: import_zod6.z.number(),
1991
+ model: import_zod6.z.string(),
1992
+ output: import_zod6.z.array(
1993
+ import_zod6.z.discriminatedUnion("type", [
1994
+ import_zod6.z.object({
1995
+ type: import_zod6.z.literal("message"),
1996
+ role: import_zod6.z.literal("assistant"),
1997
+ content: import_zod6.z.array(
1998
+ import_zod6.z.object({
1999
+ type: import_zod6.z.literal("output_text"),
2000
+ text: import_zod6.z.string(),
2001
+ annotations: import_zod6.z.array(
2002
+ import_zod6.z.object({
2003
+ type: import_zod6.z.literal("url_citation"),
2004
+ start_index: import_zod6.z.number(),
2005
+ end_index: import_zod6.z.number(),
2006
+ url: import_zod6.z.string(),
2007
+ title: import_zod6.z.string()
2008
+ })
2009
+ )
2010
+ })
2011
+ )
2012
+ }),
2013
+ import_zod6.z.object({
2014
+ type: import_zod6.z.literal("function_call"),
2015
+ call_id: import_zod6.z.string(),
2016
+ name: import_zod6.z.string(),
2017
+ arguments: import_zod6.z.string()
2018
+ }),
2019
+ import_zod6.z.object({
2020
+ type: import_zod6.z.literal("web_search_call")
2021
+ }),
2022
+ import_zod6.z.object({
2023
+ type: import_zod6.z.literal("computer_call")
2024
+ }),
2025
+ import_zod6.z.object({
2026
+ type: import_zod6.z.literal("reasoning")
2027
+ })
2028
+ ])
2029
+ ),
2030
+ incomplete_details: import_zod6.z.object({ reason: import_zod6.z.string() }).nullable(),
2031
+ usage: usageSchema
2032
+ })
2033
+ ),
2034
+ abortSignal: options.abortSignal,
2035
+ fetch: this.config.fetch
2036
+ });
2037
+ const outputTextElements = response.output.filter((output) => output.type === "message").flatMap((output) => output.content).filter((content) => content.type === "output_text");
2038
+ const toolCalls = response.output.filter((output) => output.type === "function_call").map((output) => ({
2039
+ toolCallType: "function",
2040
+ toolCallId: output.call_id,
2041
+ toolName: output.name,
2042
+ args: output.arguments
2043
+ }));
2044
+ return {
2045
+ text: outputTextElements.map((content) => content.text).join("\n"),
2046
+ sources: outputTextElements.flatMap(
2047
+ (content) => content.annotations.map((annotation) => {
2048
+ var _a2, _b2, _c2;
2049
+ return {
2050
+ sourceType: "url",
2051
+ id: (_c2 = (_b2 = (_a2 = this.config).generateId) == null ? void 0 : _b2.call(_a2)) != null ? _c2 : (0, import_provider_utils8.generateId)(),
2052
+ url: annotation.url,
2053
+ title: annotation.title
2054
+ };
2055
+ })
2056
+ ),
2057
+ finishReason: mapOpenAIResponseFinishReason({
2058
+ finishReason: (_a = response.incomplete_details) == null ? void 0 : _a.reason,
2059
+ hasToolCalls: toolCalls.length > 0
2060
+ }),
2061
+ toolCalls: toolCalls.length > 0 ? toolCalls : void 0,
2062
+ usage: {
2063
+ promptTokens: response.usage.input_tokens,
2064
+ completionTokens: response.usage.output_tokens
2065
+ },
2066
+ rawCall: {
2067
+ rawPrompt: void 0,
2068
+ rawSettings: {}
2069
+ },
2070
+ rawResponse: {
2071
+ headers: responseHeaders,
2072
+ body: rawResponse
2073
+ },
2074
+ request: {
2075
+ body: JSON.stringify(body)
2076
+ },
2077
+ response: {
2078
+ id: response.id,
2079
+ timestamp: new Date(response.created_at * 1e3),
2080
+ modelId: response.model
2081
+ },
2082
+ providerMetadata: {
2083
+ openai: {
2084
+ responseId: response.id,
2085
+ cachedPromptTokens: (_c = (_b = response.usage.input_tokens_details) == null ? void 0 : _b.cached_tokens) != null ? _c : null,
2086
+ reasoningTokens: (_e = (_d = response.usage.output_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : null
2087
+ }
2088
+ },
2089
+ warnings
2090
+ };
2091
+ }
2092
+ async doStream(options) {
2093
+ const { args: body, warnings } = this.getArgs(options);
2094
+ const { responseHeaders, value: response } = await (0, import_provider_utils8.postJsonToApi)({
2095
+ url: this.config.url({
2096
+ path: "/responses",
2097
+ modelId: this.modelId
2098
+ }),
2099
+ headers: (0, import_provider_utils8.combineHeaders)(this.config.headers(), options.headers),
2100
+ body: {
2101
+ ...body,
2102
+ stream: true
2103
+ },
2104
+ failedResponseHandler: openaiFailedResponseHandler,
2105
+ successfulResponseHandler: (0, import_provider_utils8.createEventSourceResponseHandler)(
2106
+ openaiResponsesChunkSchema
2107
+ ),
2108
+ abortSignal: options.abortSignal,
2109
+ fetch: this.config.fetch
2110
+ });
2111
+ const self = this;
2112
+ let finishReason = "unknown";
2113
+ let promptTokens = NaN;
2114
+ let completionTokens = NaN;
2115
+ let cachedPromptTokens = null;
2116
+ let reasoningTokens = null;
2117
+ let responseId = null;
2118
+ const ongoingToolCalls = {};
2119
+ let hasToolCalls = false;
2120
+ return {
2121
+ stream: response.pipeThrough(
2122
+ new TransformStream({
2123
+ transform(chunk, controller) {
2124
+ var _a, _b, _c, _d, _e, _f, _g, _h;
2125
+ if (!chunk.success) {
2126
+ finishReason = "error";
2127
+ controller.enqueue({ type: "error", error: chunk.error });
2128
+ return;
2129
+ }
2130
+ const value = chunk.value;
2131
+ if (isResponseOutputItemAddedChunk(value)) {
2132
+ if (value.item.type === "function_call") {
2133
+ ongoingToolCalls[value.output_index] = {
2134
+ toolName: value.item.name,
2135
+ toolCallId: value.item.call_id
2136
+ };
2137
+ controller.enqueue({
2138
+ type: "tool-call-delta",
2139
+ toolCallType: "function",
2140
+ toolCallId: value.item.call_id,
2141
+ toolName: value.item.name,
2142
+ argsTextDelta: value.item.arguments
2143
+ });
2144
+ }
2145
+ } else if (isResponseFunctionCallArgumentsDeltaChunk(value)) {
2146
+ const toolCall = ongoingToolCalls[value.output_index];
2147
+ if (toolCall != null) {
2148
+ controller.enqueue({
2149
+ type: "tool-call-delta",
2150
+ toolCallType: "function",
2151
+ toolCallId: toolCall.toolCallId,
2152
+ toolName: toolCall.toolName,
2153
+ argsTextDelta: value.delta
2154
+ });
2155
+ }
2156
+ } else if (isResponseCreatedChunk(value)) {
2157
+ responseId = value.response.id;
2158
+ controller.enqueue({
2159
+ type: "response-metadata",
2160
+ id: value.response.id,
2161
+ timestamp: new Date(value.response.created_at * 1e3),
2162
+ modelId: value.response.model
2163
+ });
2164
+ } else if (isTextDeltaChunk(value)) {
2165
+ controller.enqueue({
2166
+ type: "text-delta",
2167
+ textDelta: value.delta
2168
+ });
2169
+ } else if (isResponseOutputItemDoneChunk(value) && value.item.type === "function_call") {
2170
+ ongoingToolCalls[value.output_index] = void 0;
2171
+ hasToolCalls = true;
2172
+ controller.enqueue({
2173
+ type: "tool-call",
2174
+ toolCallType: "function",
2175
+ toolCallId: value.item.call_id,
2176
+ toolName: value.item.name,
2177
+ args: value.item.arguments
2178
+ });
2179
+ } else if (isResponseFinishedChunk(value)) {
2180
+ finishReason = mapOpenAIResponseFinishReason({
2181
+ finishReason: (_a = value.response.incomplete_details) == null ? void 0 : _a.reason,
2182
+ hasToolCalls
2183
+ });
2184
+ promptTokens = value.response.usage.input_tokens;
2185
+ completionTokens = value.response.usage.output_tokens;
2186
+ cachedPromptTokens = (_c = (_b = value.response.usage.input_tokens_details) == null ? void 0 : _b.cached_tokens) != null ? _c : cachedPromptTokens;
2187
+ reasoningTokens = (_e = (_d = value.response.usage.output_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : reasoningTokens;
2188
+ } else if (isResponseAnnotationAddedChunk(value)) {
2189
+ controller.enqueue({
2190
+ type: "source",
2191
+ source: {
2192
+ sourceType: "url",
2193
+ id: (_h = (_g = (_f = self.config).generateId) == null ? void 0 : _g.call(_f)) != null ? _h : (0, import_provider_utils8.generateId)(),
2194
+ url: value.annotation.url,
2195
+ title: value.annotation.title
2196
+ }
2197
+ });
2198
+ }
2199
+ },
2200
+ flush(controller) {
2201
+ controller.enqueue({
2202
+ type: "finish",
2203
+ finishReason,
2204
+ usage: { promptTokens, completionTokens },
2205
+ ...(cachedPromptTokens != null || reasoningTokens != null) && {
2206
+ providerMetadata: {
2207
+ openai: {
2208
+ responseId,
2209
+ cachedPromptTokens,
2210
+ reasoningTokens
2211
+ }
2212
+ }
2213
+ }
2214
+ });
2215
+ }
2216
+ })
2217
+ ),
2218
+ rawCall: {
2219
+ rawPrompt: void 0,
2220
+ rawSettings: {}
2221
+ },
2222
+ rawResponse: { headers: responseHeaders },
2223
+ request: { body: JSON.stringify(body) },
2224
+ warnings
2225
+ };
2226
+ }
2227
+ };
2228
+ var usageSchema = import_zod6.z.object({
2229
+ input_tokens: import_zod6.z.number(),
2230
+ input_tokens_details: import_zod6.z.object({ cached_tokens: import_zod6.z.number().nullish() }).nullish(),
2231
+ output_tokens: import_zod6.z.number(),
2232
+ output_tokens_details: import_zod6.z.object({ reasoning_tokens: import_zod6.z.number().nullish() }).nullish()
2233
+ });
2234
+ var textDeltaChunkSchema = import_zod6.z.object({
2235
+ type: import_zod6.z.literal("response.output_text.delta"),
2236
+ delta: import_zod6.z.string()
2237
+ });
2238
+ var responseFinishedChunkSchema = import_zod6.z.object({
2239
+ type: import_zod6.z.enum(["response.completed", "response.incomplete"]),
2240
+ response: import_zod6.z.object({
2241
+ incomplete_details: import_zod6.z.object({ reason: import_zod6.z.string() }).nullish(),
2242
+ usage: usageSchema
2243
+ })
2244
+ });
2245
+ var responseCreatedChunkSchema = import_zod6.z.object({
2246
+ type: import_zod6.z.literal("response.created"),
2247
+ response: import_zod6.z.object({
2248
+ id: import_zod6.z.string(),
2249
+ created_at: import_zod6.z.number(),
2250
+ model: import_zod6.z.string()
2251
+ })
2252
+ });
2253
+ var responseOutputItemDoneSchema = import_zod6.z.object({
2254
+ type: import_zod6.z.literal("response.output_item.done"),
2255
+ output_index: import_zod6.z.number(),
2256
+ item: import_zod6.z.discriminatedUnion("type", [
2257
+ import_zod6.z.object({
2258
+ type: import_zod6.z.literal("message")
2259
+ }),
2260
+ import_zod6.z.object({
2261
+ type: import_zod6.z.literal("function_call"),
2262
+ id: import_zod6.z.string(),
2263
+ call_id: import_zod6.z.string(),
2264
+ name: import_zod6.z.string(),
2265
+ arguments: import_zod6.z.string(),
2266
+ status: import_zod6.z.literal("completed")
2267
+ })
2268
+ ])
2269
+ });
2270
+ var responseFunctionCallArgumentsDeltaSchema = import_zod6.z.object({
2271
+ type: import_zod6.z.literal("response.function_call_arguments.delta"),
2272
+ item_id: import_zod6.z.string(),
2273
+ output_index: import_zod6.z.number(),
2274
+ delta: import_zod6.z.string()
2275
+ });
2276
+ var responseOutputItemAddedSchema = import_zod6.z.object({
2277
+ type: import_zod6.z.literal("response.output_item.added"),
2278
+ output_index: import_zod6.z.number(),
2279
+ item: import_zod6.z.discriminatedUnion("type", [
2280
+ import_zod6.z.object({
2281
+ type: import_zod6.z.literal("message")
2282
+ }),
2283
+ import_zod6.z.object({
2284
+ type: import_zod6.z.literal("function_call"),
2285
+ id: import_zod6.z.string(),
2286
+ call_id: import_zod6.z.string(),
2287
+ name: import_zod6.z.string(),
2288
+ arguments: import_zod6.z.string()
2289
+ })
2290
+ ])
2291
+ });
2292
+ var responseAnnotationAddedSchema = import_zod6.z.object({
2293
+ type: import_zod6.z.literal("response.output_text.annotation.added"),
2294
+ annotation: import_zod6.z.object({
2295
+ type: import_zod6.z.literal("url_citation"),
2296
+ url: import_zod6.z.string(),
2297
+ title: import_zod6.z.string()
2298
+ })
2299
+ });
2300
+ var openaiResponsesChunkSchema = import_zod6.z.union([
2301
+ textDeltaChunkSchema,
2302
+ responseFinishedChunkSchema,
2303
+ responseCreatedChunkSchema,
2304
+ responseOutputItemDoneSchema,
2305
+ responseFunctionCallArgumentsDeltaSchema,
2306
+ responseOutputItemAddedSchema,
2307
+ responseAnnotationAddedSchema,
2308
+ import_zod6.z.object({ type: import_zod6.z.string() }).passthrough()
2309
+ // fallback for unknown chunks
2310
+ ]);
2311
+ function isTextDeltaChunk(chunk) {
2312
+ return chunk.type === "response.output_text.delta";
2313
+ }
2314
+ function isResponseOutputItemDoneChunk(chunk) {
2315
+ return chunk.type === "response.output_item.done";
2316
+ }
2317
+ function isResponseFinishedChunk(chunk) {
2318
+ return chunk.type === "response.completed" || chunk.type === "response.incomplete";
2319
+ }
2320
+ function isResponseCreatedChunk(chunk) {
2321
+ return chunk.type === "response.created";
2322
+ }
2323
+ function isResponseFunctionCallArgumentsDeltaChunk(chunk) {
2324
+ return chunk.type === "response.function_call_arguments.delta";
2325
+ }
2326
+ function isResponseOutputItemAddedChunk(chunk) {
2327
+ return chunk.type === "response.output_item.added";
2328
+ }
2329
+ function isResponseAnnotationAddedChunk(chunk) {
2330
+ return chunk.type === "response.output_text.annotation.added";
2331
+ }
2332
+ function getResponsesModelConfig(modelId) {
2333
+ if (modelId.startsWith("o")) {
2334
+ if (modelId.startsWith("o1-mini") || modelId.startsWith("o1-preview")) {
2335
+ return {
2336
+ isReasoningModel: true,
2337
+ systemMessageMode: "remove",
2338
+ requiredAutoTruncation: false
2339
+ };
2340
+ }
2341
+ return {
2342
+ isReasoningModel: true,
2343
+ systemMessageMode: "developer",
2344
+ requiredAutoTruncation: false
2345
+ };
2346
+ }
2347
+ return {
2348
+ isReasoningModel: false,
2349
+ systemMessageMode: "system",
2350
+ requiredAutoTruncation: false
2351
+ };
2352
+ }
2353
+
2354
+ // src/openai-tools.ts
2355
+ var import_zod7 = require("zod");
2356
+ var WebSearchPreviewParameters = import_zod7.z.object({});
2357
+ function webSearchPreviewTool({
2358
+ searchContextSize,
2359
+ userLocation
2360
+ } = {}) {
2361
+ return {
2362
+ type: "provider-defined",
2363
+ id: "openai.web_search_preview",
2364
+ args: {
2365
+ searchContextSize,
2366
+ userLocation
2367
+ },
2368
+ parameters: WebSearchPreviewParameters
2369
+ };
2370
+ }
2371
+ var openaiTools = {
2372
+ webSearchPreview: webSearchPreviewTool
2373
+ };
2374
+
1592
2375
  // src/openai-provider.ts
1593
2376
  function createOpenAI(options = {}) {
1594
2377
  var _a, _b, _c;
1595
- const baseURL = (_a = (0, import_provider_utils7.withoutTrailingSlash)(options.baseURL)) != null ? _a : "https://api.openai.com/v1";
2378
+ const baseURL = (_a = (0, import_provider_utils9.withoutTrailingSlash)(options.baseURL)) != null ? _a : "https://api.openai.com/v1";
1596
2379
  const compatibility = (_b = options.compatibility) != null ? _b : "compatible";
1597
2380
  const providerName = (_c = options.name) != null ? _c : "openai";
1598
2381
  const getHeaders = () => ({
1599
- Authorization: `Bearer ${(0, import_provider_utils7.loadApiKey)({
2382
+ Authorization: `Bearer ${(0, import_provider_utils9.loadApiKey)({
1600
2383
  apiKey: options.apiKey,
1601
2384
  environmentVariableName: "OPENAI_API_KEY",
1602
2385
  description: "OpenAI"
@@ -1645,17 +2428,27 @@ function createOpenAI(options = {}) {
1645
2428
  }
1646
2429
  return createChatModel(modelId, settings);
1647
2430
  };
2431
+ const createResponsesModel = (modelId) => {
2432
+ return new OpenAIResponsesLanguageModel(modelId, {
2433
+ provider: `${providerName}.responses`,
2434
+ url: ({ path }) => `${baseURL}${path}`,
2435
+ headers: getHeaders,
2436
+ fetch: options.fetch
2437
+ });
2438
+ };
1648
2439
  const provider = function(modelId, settings) {
1649
2440
  return createLanguageModel(modelId, settings);
1650
2441
  };
1651
2442
  provider.languageModel = createLanguageModel;
1652
2443
  provider.chat = createChatModel;
1653
2444
  provider.completion = createCompletionModel;
2445
+ provider.responses = createResponsesModel;
1654
2446
  provider.embedding = createEmbeddingModel;
1655
2447
  provider.textEmbedding = createEmbeddingModel;
1656
2448
  provider.textEmbeddingModel = createEmbeddingModel;
1657
2449
  provider.image = createImageModel;
1658
2450
  provider.imageModel = createImageModel;
2451
+ provider.tools = openaiTools;
1659
2452
  return provider;
1660
2453
  }
1661
2454
  var openai = createOpenAI({