@ai-sdk/openai 1.2.0 → 1.2.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -24,6 +24,7 @@ __export(internal_exports, {
24
24
  OpenAICompletionLanguageModel: () => OpenAICompletionLanguageModel,
25
25
  OpenAIEmbeddingModel: () => OpenAIEmbeddingModel,
26
26
  OpenAIImageModel: () => OpenAIImageModel,
27
+ OpenAIResponsesLanguageModel: () => OpenAIResponsesLanguageModel,
27
28
  modelMaxImagesPerCall: () => modelMaxImagesPerCall
28
29
  });
29
30
  module.exports = __toCommonJS(internal_exports);
@@ -42,6 +43,7 @@ function convertToOpenAIChatMessages({
42
43
  systemMessageMode = "system"
43
44
  }) {
44
45
  const messages = [];
46
+ const warnings = [];
45
47
  for (const { role, content } of prompt) {
46
48
  switch (role) {
47
49
  case "system": {
@@ -55,6 +57,10 @@ function convertToOpenAIChatMessages({
55
57
  break;
56
58
  }
57
59
  case "remove": {
60
+ warnings.push({
61
+ type: "other",
62
+ message: "system messages are removed for this model"
63
+ });
58
64
  break;
59
65
  }
60
66
  default: {
@@ -195,7 +201,7 @@ function convertToOpenAIChatMessages({
195
201
  }
196
202
  }
197
203
  }
198
- return messages;
204
+ return { messages, warnings };
199
205
  }
200
206
 
201
207
  // src/map-openai-chat-logprobs.ts
@@ -426,12 +432,14 @@ var OpenAIChatLanguageModel = class {
426
432
  functionality: "structuredOutputs with useLegacyFunctionCalling"
427
433
  });
428
434
  }
429
- if (getSystemMessageMode(this.modelId) === "remove" && prompt.some((message) => message.role === "system")) {
430
- warnings.push({
431
- type: "other",
432
- message: "system messages are removed for this model"
433
- });
434
- }
435
+ const { messages, warnings: messageWarnings } = convertToOpenAIChatMessages(
436
+ {
437
+ prompt,
438
+ useLegacyFunctionCalling,
439
+ systemMessageMode: getSystemMessageMode(this.modelId)
440
+ }
441
+ );
442
+ warnings.push(...messageWarnings);
435
443
  const baseArgs = {
436
444
  // model id:
437
445
  model: this.modelId,
@@ -466,11 +474,7 @@ var OpenAIChatLanguageModel = class {
466
474
  prediction: (_e = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _e.prediction,
467
475
  reasoning_effort: (_g = (_f = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _f.reasoningEffort) != null ? _g : this.settings.reasoningEffort,
468
476
  // messages:
469
- messages: convertToOpenAIChatMessages({
470
- prompt,
471
- useLegacyFunctionCalling,
472
- systemMessageMode: getSystemMessageMode(this.modelId)
473
- })
477
+ messages
474
478
  };
475
479
  if (isReasoningModel(this.modelId)) {
476
480
  if (baseArgs.temperature != null) {
@@ -612,7 +616,11 @@ var OpenAIChatLanguageModel = class {
612
616
  async doGenerate(options) {
613
617
  var _a, _b, _c, _d, _e, _f, _g, _h;
614
618
  const { args: body, warnings } = this.getArgs(options);
615
- const { responseHeaders, value: response } = await (0, import_provider_utils3.postJsonToApi)({
619
+ const {
620
+ responseHeaders,
621
+ value: response,
622
+ rawValue: rawResponse
623
+ } = await (0, import_provider_utils3.postJsonToApi)({
616
624
  url: this.config.url({
617
625
  path: "/chat/completions",
618
626
  modelId: this.modelId
@@ -667,7 +675,7 @@ var OpenAIChatLanguageModel = class {
667
675
  completionTokens: (_h = (_g = response.usage) == null ? void 0 : _g.completion_tokens) != null ? _h : NaN
668
676
  },
669
677
  rawCall: { rawPrompt, rawSettings },
670
- rawResponse: { headers: responseHeaders },
678
+ rawResponse: { headers: responseHeaders, body: rawResponse },
671
679
  request: { body: JSON.stringify(body) },
672
680
  response: getResponseMetadata(response),
673
681
  warnings,
@@ -1269,7 +1277,11 @@ var OpenAICompletionLanguageModel = class {
1269
1277
  }
1270
1278
  async doGenerate(options) {
1271
1279
  const { args, warnings } = this.getArgs(options);
1272
- const { responseHeaders, value: response } = await (0, import_provider_utils4.postJsonToApi)({
1280
+ const {
1281
+ responseHeaders,
1282
+ value: response,
1283
+ rawValue: rawResponse
1284
+ } = await (0, import_provider_utils4.postJsonToApi)({
1273
1285
  url: this.config.url({
1274
1286
  path: "/completions",
1275
1287
  modelId: this.modelId
@@ -1294,7 +1306,7 @@ var OpenAICompletionLanguageModel = class {
1294
1306
  finishReason: mapOpenAIFinishReason(choice.finish_reason),
1295
1307
  logprobs: mapOpenAICompletionLogProbs(choice.logprobs),
1296
1308
  rawCall: { rawPrompt, rawSettings },
1297
- rawResponse: { headers: responseHeaders },
1309
+ rawResponse: { headers: responseHeaders, body: rawResponse },
1298
1310
  response: getResponseMetadata(response),
1299
1311
  warnings,
1300
1312
  request: { body: JSON.stringify(args) }
@@ -1588,12 +1600,764 @@ var OpenAIImageModel = class {
1588
1600
  var openaiImageResponseSchema = import_zod5.z.object({
1589
1601
  data: import_zod5.z.array(import_zod5.z.object({ b64_json: import_zod5.z.string() }))
1590
1602
  });
1603
+
1604
+ // src/responses/openai-responses-language-model.ts
1605
+ var import_provider_utils8 = require("@ai-sdk/provider-utils");
1606
+ var import_zod6 = require("zod");
1607
+
1608
+ // src/responses/convert-to-openai-responses-messages.ts
1609
+ var import_provider7 = require("@ai-sdk/provider");
1610
+ var import_provider_utils7 = require("@ai-sdk/provider-utils");
1611
+ function convertToOpenAIResponsesMessages({
1612
+ prompt,
1613
+ systemMessageMode
1614
+ }) {
1615
+ const messages = [];
1616
+ const warnings = [];
1617
+ for (const { role, content } of prompt) {
1618
+ switch (role) {
1619
+ case "system": {
1620
+ switch (systemMessageMode) {
1621
+ case "system": {
1622
+ messages.push({ role: "system", content });
1623
+ break;
1624
+ }
1625
+ case "developer": {
1626
+ messages.push({ role: "developer", content });
1627
+ break;
1628
+ }
1629
+ case "remove": {
1630
+ warnings.push({
1631
+ type: "other",
1632
+ message: "system messages are removed for this model"
1633
+ });
1634
+ break;
1635
+ }
1636
+ default: {
1637
+ const _exhaustiveCheck = systemMessageMode;
1638
+ throw new Error(
1639
+ `Unsupported system message mode: ${_exhaustiveCheck}`
1640
+ );
1641
+ }
1642
+ }
1643
+ break;
1644
+ }
1645
+ case "user": {
1646
+ messages.push({
1647
+ role: "user",
1648
+ content: content.map((part) => {
1649
+ var _a, _b, _c;
1650
+ switch (part.type) {
1651
+ case "text": {
1652
+ return { type: "input_text", text: part.text };
1653
+ }
1654
+ case "image": {
1655
+ return {
1656
+ type: "input_image",
1657
+ image_url: part.image instanceof URL ? part.image.toString() : `data:${(_a = part.mimeType) != null ? _a : "image/jpeg"};base64,${(0, import_provider_utils7.convertUint8ArrayToBase64)(part.image)}`,
1658
+ // OpenAI specific extension: image detail
1659
+ detail: (_c = (_b = part.providerMetadata) == null ? void 0 : _b.openai) == null ? void 0 : _c.imageDetail
1660
+ };
1661
+ }
1662
+ case "file": {
1663
+ throw new import_provider7.UnsupportedFunctionalityError({
1664
+ functionality: "Image content parts in user messages"
1665
+ });
1666
+ }
1667
+ }
1668
+ })
1669
+ });
1670
+ break;
1671
+ }
1672
+ case "assistant": {
1673
+ for (const part of content) {
1674
+ switch (part.type) {
1675
+ case "text": {
1676
+ messages.push({
1677
+ role: "assistant",
1678
+ content: [{ type: "output_text", text: part.text }]
1679
+ });
1680
+ break;
1681
+ }
1682
+ case "tool-call": {
1683
+ messages.push({
1684
+ type: "function_call",
1685
+ call_id: part.toolCallId,
1686
+ name: part.toolName,
1687
+ arguments: JSON.stringify(part.args)
1688
+ });
1689
+ break;
1690
+ }
1691
+ }
1692
+ }
1693
+ break;
1694
+ }
1695
+ case "tool": {
1696
+ for (const part of content) {
1697
+ messages.push({
1698
+ type: "function_call_output",
1699
+ call_id: part.toolCallId,
1700
+ output: JSON.stringify(part.result)
1701
+ });
1702
+ }
1703
+ break;
1704
+ }
1705
+ default: {
1706
+ const _exhaustiveCheck = role;
1707
+ throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
1708
+ }
1709
+ }
1710
+ }
1711
+ return { messages, warnings };
1712
+ }
1713
+
1714
+ // src/responses/map-openai-responses-finish-reason.ts
1715
+ function mapOpenAIResponseFinishReason({
1716
+ finishReason,
1717
+ hasToolCalls
1718
+ }) {
1719
+ switch (finishReason) {
1720
+ case void 0:
1721
+ case null:
1722
+ return hasToolCalls ? "tool-calls" : "stop";
1723
+ case "max_output_tokens":
1724
+ return "length";
1725
+ case "content_filter":
1726
+ return "content-filter";
1727
+ default:
1728
+ return hasToolCalls ? "tool-calls" : "unknown";
1729
+ }
1730
+ }
1731
+
1732
+ // src/responses/openai-responses-prepare-tools.ts
1733
+ var import_provider8 = require("@ai-sdk/provider");
1734
+ function prepareResponsesTools({
1735
+ mode,
1736
+ strict
1737
+ }) {
1738
+ var _a;
1739
+ const tools = ((_a = mode.tools) == null ? void 0 : _a.length) ? mode.tools : void 0;
1740
+ const toolWarnings = [];
1741
+ if (tools == null) {
1742
+ return { tools: void 0, tool_choice: void 0, toolWarnings };
1743
+ }
1744
+ const toolChoice = mode.toolChoice;
1745
+ const openaiTools = [];
1746
+ for (const tool of tools) {
1747
+ switch (tool.type) {
1748
+ case "function":
1749
+ openaiTools.push({
1750
+ type: "function",
1751
+ name: tool.name,
1752
+ description: tool.description,
1753
+ parameters: tool.parameters,
1754
+ strict: strict ? true : void 0
1755
+ });
1756
+ break;
1757
+ case "provider-defined":
1758
+ switch (tool.id) {
1759
+ case "openai.web_search_preview":
1760
+ openaiTools.push({
1761
+ type: "web_search_preview",
1762
+ search_context_size: tool.args.searchContextSize,
1763
+ user_location: tool.args.userLocation
1764
+ });
1765
+ break;
1766
+ default:
1767
+ toolWarnings.push({ type: "unsupported-tool", tool });
1768
+ break;
1769
+ }
1770
+ break;
1771
+ default:
1772
+ toolWarnings.push({ type: "unsupported-tool", tool });
1773
+ break;
1774
+ }
1775
+ }
1776
+ if (toolChoice == null) {
1777
+ return { tools: openaiTools, tool_choice: void 0, toolWarnings };
1778
+ }
1779
+ const type = toolChoice.type;
1780
+ switch (type) {
1781
+ case "auto":
1782
+ case "none":
1783
+ case "required":
1784
+ return { tools: openaiTools, tool_choice: type, toolWarnings };
1785
+ case "tool":
1786
+ return {
1787
+ tools: openaiTools,
1788
+ tool_choice: {
1789
+ type: "function",
1790
+ name: toolChoice.toolName
1791
+ },
1792
+ toolWarnings
1793
+ };
1794
+ default: {
1795
+ const _exhaustiveCheck = type;
1796
+ throw new import_provider8.UnsupportedFunctionalityError({
1797
+ functionality: `Unsupported tool choice type: ${_exhaustiveCheck}`
1798
+ });
1799
+ }
1800
+ }
1801
+ }
1802
+
1803
+ // src/responses/openai-responses-language-model.ts
1804
+ var OpenAIResponsesLanguageModel = class {
1805
+ constructor(modelId, config) {
1806
+ this.specificationVersion = "v1";
1807
+ this.defaultObjectGenerationMode = "json";
1808
+ this.modelId = modelId;
1809
+ this.config = config;
1810
+ }
1811
+ get provider() {
1812
+ return this.config.provider;
1813
+ }
1814
+ getArgs({
1815
+ mode,
1816
+ maxTokens,
1817
+ temperature,
1818
+ stopSequences,
1819
+ topP,
1820
+ topK,
1821
+ presencePenalty,
1822
+ frequencyPenalty,
1823
+ seed,
1824
+ prompt,
1825
+ providerMetadata,
1826
+ responseFormat
1827
+ }) {
1828
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k;
1829
+ const warnings = [];
1830
+ const modelConfig = getResponsesModelConfig(this.modelId);
1831
+ const type = mode.type;
1832
+ if (topK != null) {
1833
+ warnings.push({
1834
+ type: "unsupported-setting",
1835
+ setting: "topK"
1836
+ });
1837
+ }
1838
+ if (seed != null) {
1839
+ warnings.push({
1840
+ type: "unsupported-setting",
1841
+ setting: "seed"
1842
+ });
1843
+ }
1844
+ if (presencePenalty != null) {
1845
+ warnings.push({
1846
+ type: "unsupported-setting",
1847
+ setting: "presencePenalty"
1848
+ });
1849
+ }
1850
+ if (frequencyPenalty != null) {
1851
+ warnings.push({
1852
+ type: "unsupported-setting",
1853
+ setting: "frequencyPenalty"
1854
+ });
1855
+ }
1856
+ if (stopSequences != null) {
1857
+ warnings.push({
1858
+ type: "unsupported-setting",
1859
+ setting: "stopSequences"
1860
+ });
1861
+ }
1862
+ const { messages, warnings: messageWarnings } = convertToOpenAIResponsesMessages({
1863
+ prompt,
1864
+ systemMessageMode: modelConfig.systemMessageMode
1865
+ });
1866
+ warnings.push(...messageWarnings);
1867
+ const isStrictJsonSchema = (_b = (_a = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _a.strictJsonSchema) != null ? _b : true;
1868
+ const baseArgs = {
1869
+ model: this.modelId,
1870
+ input: messages,
1871
+ temperature,
1872
+ top_p: topP,
1873
+ max_output_tokens: maxTokens,
1874
+ ...(responseFormat == null ? void 0 : responseFormat.type) === "json" && {
1875
+ text: {
1876
+ format: responseFormat.schema != null ? {
1877
+ type: "json_schema",
1878
+ strict: isStrictJsonSchema,
1879
+ name: (_c = responseFormat.name) != null ? _c : "response",
1880
+ description: responseFormat.description,
1881
+ schema: responseFormat.schema
1882
+ } : { type: "json_object" }
1883
+ }
1884
+ },
1885
+ // provider options:
1886
+ metadata: (_d = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _d.metadata,
1887
+ parallel_tool_calls: (_e = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _e.parallelToolCalls,
1888
+ previous_response_id: (_f = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _f.previousResponseId,
1889
+ store: (_g = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _g.store,
1890
+ user: (_h = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _h.user,
1891
+ // model-specific settings:
1892
+ ...modelConfig.isReasoningModel && ((_i = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _i.reasoningEffort) != null && {
1893
+ reasoning: { effort: (_j = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _j.reasoningEffort }
1894
+ },
1895
+ ...modelConfig.requiredAutoTruncation && {
1896
+ truncation: "auto"
1897
+ }
1898
+ };
1899
+ if (modelConfig.isReasoningModel) {
1900
+ if (baseArgs.temperature != null) {
1901
+ baseArgs.temperature = void 0;
1902
+ warnings.push({
1903
+ type: "unsupported-setting",
1904
+ setting: "temperature",
1905
+ details: "temperature is not supported for reasoning models"
1906
+ });
1907
+ }
1908
+ if (baseArgs.top_p != null) {
1909
+ baseArgs.top_p = void 0;
1910
+ warnings.push({
1911
+ type: "unsupported-setting",
1912
+ setting: "topP",
1913
+ details: "topP is not supported for reasoning models"
1914
+ });
1915
+ }
1916
+ }
1917
+ switch (type) {
1918
+ case "regular": {
1919
+ const { tools, tool_choice, toolWarnings } = prepareResponsesTools({
1920
+ mode,
1921
+ strict: true
1922
+ });
1923
+ return {
1924
+ args: {
1925
+ ...baseArgs,
1926
+ tools,
1927
+ tool_choice
1928
+ },
1929
+ warnings: [...warnings, ...toolWarnings]
1930
+ };
1931
+ }
1932
+ case "object-json": {
1933
+ return {
1934
+ args: {
1935
+ ...baseArgs,
1936
+ text: {
1937
+ format: mode.schema != null ? {
1938
+ type: "json_schema",
1939
+ strict: isStrictJsonSchema,
1940
+ name: (_k = mode.name) != null ? _k : "response",
1941
+ description: mode.description,
1942
+ schema: mode.schema
1943
+ } : { type: "json_object" }
1944
+ }
1945
+ },
1946
+ warnings
1947
+ };
1948
+ }
1949
+ case "object-tool": {
1950
+ return {
1951
+ args: {
1952
+ ...baseArgs,
1953
+ tool_choice: { type: "function", name: mode.tool.name },
1954
+ tools: [
1955
+ {
1956
+ type: "function",
1957
+ name: mode.tool.name,
1958
+ description: mode.tool.description,
1959
+ parameters: mode.tool.parameters,
1960
+ strict: isStrictJsonSchema
1961
+ }
1962
+ ]
1963
+ },
1964
+ warnings
1965
+ };
1966
+ }
1967
+ default: {
1968
+ const _exhaustiveCheck = type;
1969
+ throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
1970
+ }
1971
+ }
1972
+ }
1973
+ async doGenerate(options) {
1974
+ var _a, _b, _c, _d, _e;
1975
+ const { args: body, warnings } = this.getArgs(options);
1976
+ const {
1977
+ responseHeaders,
1978
+ value: response,
1979
+ rawValue: rawResponse
1980
+ } = await (0, import_provider_utils8.postJsonToApi)({
1981
+ url: this.config.url({
1982
+ path: "/responses",
1983
+ modelId: this.modelId
1984
+ }),
1985
+ headers: (0, import_provider_utils8.combineHeaders)(this.config.headers(), options.headers),
1986
+ body,
1987
+ failedResponseHandler: openaiFailedResponseHandler,
1988
+ successfulResponseHandler: (0, import_provider_utils8.createJsonResponseHandler)(
1989
+ import_zod6.z.object({
1990
+ id: import_zod6.z.string(),
1991
+ created_at: import_zod6.z.number(),
1992
+ model: import_zod6.z.string(),
1993
+ output: import_zod6.z.array(
1994
+ import_zod6.z.discriminatedUnion("type", [
1995
+ import_zod6.z.object({
1996
+ type: import_zod6.z.literal("message"),
1997
+ role: import_zod6.z.literal("assistant"),
1998
+ content: import_zod6.z.array(
1999
+ import_zod6.z.object({
2000
+ type: import_zod6.z.literal("output_text"),
2001
+ text: import_zod6.z.string(),
2002
+ annotations: import_zod6.z.array(
2003
+ import_zod6.z.object({
2004
+ type: import_zod6.z.literal("url_citation"),
2005
+ start_index: import_zod6.z.number(),
2006
+ end_index: import_zod6.z.number(),
2007
+ url: import_zod6.z.string(),
2008
+ title: import_zod6.z.string()
2009
+ })
2010
+ )
2011
+ })
2012
+ )
2013
+ }),
2014
+ import_zod6.z.object({
2015
+ type: import_zod6.z.literal("function_call"),
2016
+ call_id: import_zod6.z.string(),
2017
+ name: import_zod6.z.string(),
2018
+ arguments: import_zod6.z.string()
2019
+ }),
2020
+ import_zod6.z.object({
2021
+ type: import_zod6.z.literal("web_search_call")
2022
+ }),
2023
+ import_zod6.z.object({
2024
+ type: import_zod6.z.literal("computer_call")
2025
+ }),
2026
+ import_zod6.z.object({
2027
+ type: import_zod6.z.literal("reasoning")
2028
+ })
2029
+ ])
2030
+ ),
2031
+ incomplete_details: import_zod6.z.object({ reason: import_zod6.z.string() }).nullable(),
2032
+ usage: usageSchema
2033
+ })
2034
+ ),
2035
+ abortSignal: options.abortSignal,
2036
+ fetch: this.config.fetch
2037
+ });
2038
+ const outputTextElements = response.output.filter((output) => output.type === "message").flatMap((output) => output.content).filter((content) => content.type === "output_text");
2039
+ const toolCalls = response.output.filter((output) => output.type === "function_call").map((output) => ({
2040
+ toolCallType: "function",
2041
+ toolCallId: output.call_id,
2042
+ toolName: output.name,
2043
+ args: output.arguments
2044
+ }));
2045
+ return {
2046
+ text: outputTextElements.map((content) => content.text).join("\n"),
2047
+ sources: outputTextElements.flatMap(
2048
+ (content) => content.annotations.map((annotation) => {
2049
+ var _a2, _b2, _c2;
2050
+ return {
2051
+ sourceType: "url",
2052
+ id: (_c2 = (_b2 = (_a2 = this.config).generateId) == null ? void 0 : _b2.call(_a2)) != null ? _c2 : (0, import_provider_utils8.generateId)(),
2053
+ url: annotation.url,
2054
+ title: annotation.title
2055
+ };
2056
+ })
2057
+ ),
2058
+ finishReason: mapOpenAIResponseFinishReason({
2059
+ finishReason: (_a = response.incomplete_details) == null ? void 0 : _a.reason,
2060
+ hasToolCalls: toolCalls.length > 0
2061
+ }),
2062
+ toolCalls: toolCalls.length > 0 ? toolCalls : void 0,
2063
+ usage: {
2064
+ promptTokens: response.usage.input_tokens,
2065
+ completionTokens: response.usage.output_tokens
2066
+ },
2067
+ rawCall: {
2068
+ rawPrompt: void 0,
2069
+ rawSettings: {}
2070
+ },
2071
+ rawResponse: {
2072
+ headers: responseHeaders,
2073
+ body: rawResponse
2074
+ },
2075
+ request: {
2076
+ body: JSON.stringify(body)
2077
+ },
2078
+ response: {
2079
+ id: response.id,
2080
+ timestamp: new Date(response.created_at * 1e3),
2081
+ modelId: response.model
2082
+ },
2083
+ providerMetadata: {
2084
+ openai: {
2085
+ responseId: response.id,
2086
+ cachedPromptTokens: (_c = (_b = response.usage.input_tokens_details) == null ? void 0 : _b.cached_tokens) != null ? _c : null,
2087
+ reasoningTokens: (_e = (_d = response.usage.output_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : null
2088
+ }
2089
+ },
2090
+ warnings
2091
+ };
2092
+ }
2093
+ async doStream(options) {
2094
+ const { args: body, warnings } = this.getArgs(options);
2095
+ const { responseHeaders, value: response } = await (0, import_provider_utils8.postJsonToApi)({
2096
+ url: this.config.url({
2097
+ path: "/responses",
2098
+ modelId: this.modelId
2099
+ }),
2100
+ headers: (0, import_provider_utils8.combineHeaders)(this.config.headers(), options.headers),
2101
+ body: {
2102
+ ...body,
2103
+ stream: true
2104
+ },
2105
+ failedResponseHandler: openaiFailedResponseHandler,
2106
+ successfulResponseHandler: (0, import_provider_utils8.createEventSourceResponseHandler)(
2107
+ openaiResponsesChunkSchema
2108
+ ),
2109
+ abortSignal: options.abortSignal,
2110
+ fetch: this.config.fetch
2111
+ });
2112
+ const self = this;
2113
+ let finishReason = "unknown";
2114
+ let promptTokens = NaN;
2115
+ let completionTokens = NaN;
2116
+ let cachedPromptTokens = null;
2117
+ let reasoningTokens = null;
2118
+ let responseId = null;
2119
+ const ongoingToolCalls = {};
2120
+ let hasToolCalls = false;
2121
+ return {
2122
+ stream: response.pipeThrough(
2123
+ new TransformStream({
2124
+ transform(chunk, controller) {
2125
+ var _a, _b, _c, _d, _e, _f, _g, _h;
2126
+ if (!chunk.success) {
2127
+ finishReason = "error";
2128
+ controller.enqueue({ type: "error", error: chunk.error });
2129
+ return;
2130
+ }
2131
+ const value = chunk.value;
2132
+ if (isResponseOutputItemAddedChunk(value)) {
2133
+ if (value.item.type === "function_call") {
2134
+ ongoingToolCalls[value.output_index] = {
2135
+ toolName: value.item.name,
2136
+ toolCallId: value.item.call_id
2137
+ };
2138
+ controller.enqueue({
2139
+ type: "tool-call-delta",
2140
+ toolCallType: "function",
2141
+ toolCallId: value.item.call_id,
2142
+ toolName: value.item.name,
2143
+ argsTextDelta: value.item.arguments
2144
+ });
2145
+ }
2146
+ } else if (isResponseFunctionCallArgumentsDeltaChunk(value)) {
2147
+ const toolCall = ongoingToolCalls[value.output_index];
2148
+ if (toolCall != null) {
2149
+ controller.enqueue({
2150
+ type: "tool-call-delta",
2151
+ toolCallType: "function",
2152
+ toolCallId: toolCall.toolCallId,
2153
+ toolName: toolCall.toolName,
2154
+ argsTextDelta: value.delta
2155
+ });
2156
+ }
2157
+ } else if (isResponseCreatedChunk(value)) {
2158
+ responseId = value.response.id;
2159
+ controller.enqueue({
2160
+ type: "response-metadata",
2161
+ id: value.response.id,
2162
+ timestamp: new Date(value.response.created_at * 1e3),
2163
+ modelId: value.response.model
2164
+ });
2165
+ } else if (isTextDeltaChunk(value)) {
2166
+ controller.enqueue({
2167
+ type: "text-delta",
2168
+ textDelta: value.delta
2169
+ });
2170
+ } else if (isResponseOutputItemDoneChunk(value) && value.item.type === "function_call") {
2171
+ ongoingToolCalls[value.output_index] = void 0;
2172
+ hasToolCalls = true;
2173
+ controller.enqueue({
2174
+ type: "tool-call",
2175
+ toolCallType: "function",
2176
+ toolCallId: value.item.call_id,
2177
+ toolName: value.item.name,
2178
+ args: value.item.arguments
2179
+ });
2180
+ } else if (isResponseFinishedChunk(value)) {
2181
+ finishReason = mapOpenAIResponseFinishReason({
2182
+ finishReason: (_a = value.response.incomplete_details) == null ? void 0 : _a.reason,
2183
+ hasToolCalls
2184
+ });
2185
+ promptTokens = value.response.usage.input_tokens;
2186
+ completionTokens = value.response.usage.output_tokens;
2187
+ cachedPromptTokens = (_c = (_b = value.response.usage.input_tokens_details) == null ? void 0 : _b.cached_tokens) != null ? _c : cachedPromptTokens;
2188
+ reasoningTokens = (_e = (_d = value.response.usage.output_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : reasoningTokens;
2189
+ } else if (isResponseAnnotationAddedChunk(value)) {
2190
+ controller.enqueue({
2191
+ type: "source",
2192
+ source: {
2193
+ sourceType: "url",
2194
+ id: (_h = (_g = (_f = self.config).generateId) == null ? void 0 : _g.call(_f)) != null ? _h : (0, import_provider_utils8.generateId)(),
2195
+ url: value.annotation.url,
2196
+ title: value.annotation.title
2197
+ }
2198
+ });
2199
+ }
2200
+ },
2201
+ flush(controller) {
2202
+ controller.enqueue({
2203
+ type: "finish",
2204
+ finishReason,
2205
+ usage: { promptTokens, completionTokens },
2206
+ ...(cachedPromptTokens != null || reasoningTokens != null) && {
2207
+ providerMetadata: {
2208
+ openai: {
2209
+ responseId,
2210
+ cachedPromptTokens,
2211
+ reasoningTokens
2212
+ }
2213
+ }
2214
+ }
2215
+ });
2216
+ }
2217
+ })
2218
+ ),
2219
+ rawCall: {
2220
+ rawPrompt: void 0,
2221
+ rawSettings: {}
2222
+ },
2223
+ rawResponse: { headers: responseHeaders },
2224
+ request: { body: JSON.stringify(body) },
2225
+ warnings
2226
+ };
2227
+ }
2228
+ };
2229
+ var usageSchema = import_zod6.z.object({
2230
+ input_tokens: import_zod6.z.number(),
2231
+ input_tokens_details: import_zod6.z.object({ cached_tokens: import_zod6.z.number().nullish() }).nullish(),
2232
+ output_tokens: import_zod6.z.number(),
2233
+ output_tokens_details: import_zod6.z.object({ reasoning_tokens: import_zod6.z.number().nullish() }).nullish()
2234
+ });
2235
+ var textDeltaChunkSchema = import_zod6.z.object({
2236
+ type: import_zod6.z.literal("response.output_text.delta"),
2237
+ delta: import_zod6.z.string()
2238
+ });
2239
+ var responseFinishedChunkSchema = import_zod6.z.object({
2240
+ type: import_zod6.z.enum(["response.completed", "response.incomplete"]),
2241
+ response: import_zod6.z.object({
2242
+ incomplete_details: import_zod6.z.object({ reason: import_zod6.z.string() }).nullish(),
2243
+ usage: usageSchema
2244
+ })
2245
+ });
2246
+ var responseCreatedChunkSchema = import_zod6.z.object({
2247
+ type: import_zod6.z.literal("response.created"),
2248
+ response: import_zod6.z.object({
2249
+ id: import_zod6.z.string(),
2250
+ created_at: import_zod6.z.number(),
2251
+ model: import_zod6.z.string()
2252
+ })
2253
+ });
2254
+ var responseOutputItemDoneSchema = import_zod6.z.object({
2255
+ type: import_zod6.z.literal("response.output_item.done"),
2256
+ output_index: import_zod6.z.number(),
2257
+ item: import_zod6.z.discriminatedUnion("type", [
2258
+ import_zod6.z.object({
2259
+ type: import_zod6.z.literal("message")
2260
+ }),
2261
+ import_zod6.z.object({
2262
+ type: import_zod6.z.literal("function_call"),
2263
+ id: import_zod6.z.string(),
2264
+ call_id: import_zod6.z.string(),
2265
+ name: import_zod6.z.string(),
2266
+ arguments: import_zod6.z.string(),
2267
+ status: import_zod6.z.literal("completed")
2268
+ })
2269
+ ])
2270
+ });
2271
+ var responseFunctionCallArgumentsDeltaSchema = import_zod6.z.object({
2272
+ type: import_zod6.z.literal("response.function_call_arguments.delta"),
2273
+ item_id: import_zod6.z.string(),
2274
+ output_index: import_zod6.z.number(),
2275
+ delta: import_zod6.z.string()
2276
+ });
2277
+ var responseOutputItemAddedSchema = import_zod6.z.object({
2278
+ type: import_zod6.z.literal("response.output_item.added"),
2279
+ output_index: import_zod6.z.number(),
2280
+ item: import_zod6.z.discriminatedUnion("type", [
2281
+ import_zod6.z.object({
2282
+ type: import_zod6.z.literal("message")
2283
+ }),
2284
+ import_zod6.z.object({
2285
+ type: import_zod6.z.literal("function_call"),
2286
+ id: import_zod6.z.string(),
2287
+ call_id: import_zod6.z.string(),
2288
+ name: import_zod6.z.string(),
2289
+ arguments: import_zod6.z.string()
2290
+ })
2291
+ ])
2292
+ });
2293
+ var responseAnnotationAddedSchema = import_zod6.z.object({
2294
+ type: import_zod6.z.literal("response.output_text.annotation.added"),
2295
+ annotation: import_zod6.z.object({
2296
+ type: import_zod6.z.literal("url_citation"),
2297
+ url: import_zod6.z.string(),
2298
+ title: import_zod6.z.string()
2299
+ })
2300
+ });
2301
+ var openaiResponsesChunkSchema = import_zod6.z.union([
2302
+ textDeltaChunkSchema,
2303
+ responseFinishedChunkSchema,
2304
+ responseCreatedChunkSchema,
2305
+ responseOutputItemDoneSchema,
2306
+ responseFunctionCallArgumentsDeltaSchema,
2307
+ responseOutputItemAddedSchema,
2308
+ responseAnnotationAddedSchema,
2309
+ import_zod6.z.object({ type: import_zod6.z.string() }).passthrough()
2310
+ // fallback for unknown chunks
2311
+ ]);
2312
+ function isTextDeltaChunk(chunk) {
2313
+ return chunk.type === "response.output_text.delta";
2314
+ }
2315
+ function isResponseOutputItemDoneChunk(chunk) {
2316
+ return chunk.type === "response.output_item.done";
2317
+ }
2318
+ function isResponseFinishedChunk(chunk) {
2319
+ return chunk.type === "response.completed" || chunk.type === "response.incomplete";
2320
+ }
2321
+ function isResponseCreatedChunk(chunk) {
2322
+ return chunk.type === "response.created";
2323
+ }
2324
+ function isResponseFunctionCallArgumentsDeltaChunk(chunk) {
2325
+ return chunk.type === "response.function_call_arguments.delta";
2326
+ }
2327
+ function isResponseOutputItemAddedChunk(chunk) {
2328
+ return chunk.type === "response.output_item.added";
2329
+ }
2330
+ function isResponseAnnotationAddedChunk(chunk) {
2331
+ return chunk.type === "response.output_text.annotation.added";
2332
+ }
2333
+ function getResponsesModelConfig(modelId) {
2334
+ if (modelId.startsWith("o")) {
2335
+ if (modelId.startsWith("o1-mini") || modelId.startsWith("o1-preview")) {
2336
+ return {
2337
+ isReasoningModel: true,
2338
+ systemMessageMode: "remove",
2339
+ requiredAutoTruncation: false
2340
+ };
2341
+ }
2342
+ return {
2343
+ isReasoningModel: true,
2344
+ systemMessageMode: "developer",
2345
+ requiredAutoTruncation: false
2346
+ };
2347
+ }
2348
+ return {
2349
+ isReasoningModel: false,
2350
+ systemMessageMode: "system",
2351
+ requiredAutoTruncation: false
2352
+ };
2353
+ }
1591
2354
  // Annotate the CommonJS export names for ESM import in node:
1592
2355
  0 && (module.exports = {
1593
2356
  OpenAIChatLanguageModel,
1594
2357
  OpenAICompletionLanguageModel,
1595
2358
  OpenAIEmbeddingModel,
1596
2359
  OpenAIImageModel,
2360
+ OpenAIResponsesLanguageModel,
1597
2361
  modelMaxImagesPerCall
1598
2362
  });
1599
2363
  //# sourceMappingURL=index.js.map