@ai-sdk/openai 1.2.1 → 1.2.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -24,6 +24,7 @@ function convertToOpenAIChatMessages({
24
24
  systemMessageMode = "system"
25
25
  }) {
26
26
  const messages = [];
27
+ const warnings = [];
27
28
  for (const { role, content } of prompt) {
28
29
  switch (role) {
29
30
  case "system": {
@@ -37,6 +38,10 @@ function convertToOpenAIChatMessages({
37
38
  break;
38
39
  }
39
40
  case "remove": {
41
+ warnings.push({
42
+ type: "other",
43
+ message: "system messages are removed for this model"
44
+ });
40
45
  break;
41
46
  }
42
47
  default: {
@@ -177,7 +182,7 @@ function convertToOpenAIChatMessages({
177
182
  }
178
183
  }
179
184
  }
180
- return messages;
185
+ return { messages, warnings };
181
186
  }
182
187
 
183
188
  // src/map-openai-chat-logprobs.ts
@@ -410,12 +415,14 @@ var OpenAIChatLanguageModel = class {
410
415
  functionality: "structuredOutputs with useLegacyFunctionCalling"
411
416
  });
412
417
  }
413
- if (getSystemMessageMode(this.modelId) === "remove" && prompt.some((message) => message.role === "system")) {
414
- warnings.push({
415
- type: "other",
416
- message: "system messages are removed for this model"
417
- });
418
- }
418
+ const { messages, warnings: messageWarnings } = convertToOpenAIChatMessages(
419
+ {
420
+ prompt,
421
+ useLegacyFunctionCalling,
422
+ systemMessageMode: getSystemMessageMode(this.modelId)
423
+ }
424
+ );
425
+ warnings.push(...messageWarnings);
419
426
  const baseArgs = {
420
427
  // model id:
421
428
  model: this.modelId,
@@ -450,11 +457,7 @@ var OpenAIChatLanguageModel = class {
450
457
  prediction: (_e = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _e.prediction,
451
458
  reasoning_effort: (_g = (_f = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _f.reasoningEffort) != null ? _g : this.settings.reasoningEffort,
452
459
  // messages:
453
- messages: convertToOpenAIChatMessages({
454
- prompt,
455
- useLegacyFunctionCalling,
456
- systemMessageMode: getSystemMessageMode(this.modelId)
457
- })
460
+ messages
458
461
  };
459
462
  if (isReasoningModel(this.modelId)) {
460
463
  if (baseArgs.temperature != null) {
@@ -1600,11 +1603,773 @@ var OpenAIImageModel = class {
1600
1603
  var openaiImageResponseSchema = z5.object({
1601
1604
  data: z5.array(z5.object({ b64_json: z5.string() }))
1602
1605
  });
1606
+
1607
+ // src/responses/openai-responses-language-model.ts
1608
+ import {
1609
+ combineHeaders as combineHeaders5,
1610
+ createEventSourceResponseHandler as createEventSourceResponseHandler3,
1611
+ createJsonResponseHandler as createJsonResponseHandler5,
1612
+ generateId as generateId2,
1613
+ postJsonToApi as postJsonToApi5
1614
+ } from "@ai-sdk/provider-utils";
1615
+ import { z as z6 } from "zod";
1616
+
1617
+ // src/responses/convert-to-openai-responses-messages.ts
1618
+ import {
1619
+ UnsupportedFunctionalityError as UnsupportedFunctionalityError6
1620
+ } from "@ai-sdk/provider";
1621
+ import { convertUint8ArrayToBase64 as convertUint8ArrayToBase642 } from "@ai-sdk/provider-utils";
1622
+ function convertToOpenAIResponsesMessages({
1623
+ prompt,
1624
+ systemMessageMode
1625
+ }) {
1626
+ const messages = [];
1627
+ const warnings = [];
1628
+ for (const { role, content } of prompt) {
1629
+ switch (role) {
1630
+ case "system": {
1631
+ switch (systemMessageMode) {
1632
+ case "system": {
1633
+ messages.push({ role: "system", content });
1634
+ break;
1635
+ }
1636
+ case "developer": {
1637
+ messages.push({ role: "developer", content });
1638
+ break;
1639
+ }
1640
+ case "remove": {
1641
+ warnings.push({
1642
+ type: "other",
1643
+ message: "system messages are removed for this model"
1644
+ });
1645
+ break;
1646
+ }
1647
+ default: {
1648
+ const _exhaustiveCheck = systemMessageMode;
1649
+ throw new Error(
1650
+ `Unsupported system message mode: ${_exhaustiveCheck}`
1651
+ );
1652
+ }
1653
+ }
1654
+ break;
1655
+ }
1656
+ case "user": {
1657
+ messages.push({
1658
+ role: "user",
1659
+ content: content.map((part) => {
1660
+ var _a, _b, _c;
1661
+ switch (part.type) {
1662
+ case "text": {
1663
+ return { type: "input_text", text: part.text };
1664
+ }
1665
+ case "image": {
1666
+ return {
1667
+ type: "input_image",
1668
+ image_url: part.image instanceof URL ? part.image.toString() : `data:${(_a = part.mimeType) != null ? _a : "image/jpeg"};base64,${convertUint8ArrayToBase642(part.image)}`,
1669
+ // OpenAI specific extension: image detail
1670
+ detail: (_c = (_b = part.providerMetadata) == null ? void 0 : _b.openai) == null ? void 0 : _c.imageDetail
1671
+ };
1672
+ }
1673
+ case "file": {
1674
+ throw new UnsupportedFunctionalityError6({
1675
+ functionality: "Image content parts in user messages"
1676
+ });
1677
+ }
1678
+ }
1679
+ })
1680
+ });
1681
+ break;
1682
+ }
1683
+ case "assistant": {
1684
+ for (const part of content) {
1685
+ switch (part.type) {
1686
+ case "text": {
1687
+ messages.push({
1688
+ role: "assistant",
1689
+ content: [{ type: "output_text", text: part.text }]
1690
+ });
1691
+ break;
1692
+ }
1693
+ case "tool-call": {
1694
+ messages.push({
1695
+ type: "function_call",
1696
+ call_id: part.toolCallId,
1697
+ name: part.toolName,
1698
+ arguments: JSON.stringify(part.args)
1699
+ });
1700
+ break;
1701
+ }
1702
+ }
1703
+ }
1704
+ break;
1705
+ }
1706
+ case "tool": {
1707
+ for (const part of content) {
1708
+ messages.push({
1709
+ type: "function_call_output",
1710
+ call_id: part.toolCallId,
1711
+ output: JSON.stringify(part.result)
1712
+ });
1713
+ }
1714
+ break;
1715
+ }
1716
+ default: {
1717
+ const _exhaustiveCheck = role;
1718
+ throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
1719
+ }
1720
+ }
1721
+ }
1722
+ return { messages, warnings };
1723
+ }
1724
+
1725
+ // src/responses/map-openai-responses-finish-reason.ts
1726
+ function mapOpenAIResponseFinishReason({
1727
+ finishReason,
1728
+ hasToolCalls
1729
+ }) {
1730
+ switch (finishReason) {
1731
+ case void 0:
1732
+ case null:
1733
+ return hasToolCalls ? "tool-calls" : "stop";
1734
+ case "max_output_tokens":
1735
+ return "length";
1736
+ case "content_filter":
1737
+ return "content-filter";
1738
+ default:
1739
+ return hasToolCalls ? "tool-calls" : "unknown";
1740
+ }
1741
+ }
1742
+
1743
+ // src/responses/openai-responses-prepare-tools.ts
1744
+ import {
1745
+ UnsupportedFunctionalityError as UnsupportedFunctionalityError7
1746
+ } from "@ai-sdk/provider";
1747
+ function prepareResponsesTools({
1748
+ mode,
1749
+ strict
1750
+ }) {
1751
+ var _a;
1752
+ const tools = ((_a = mode.tools) == null ? void 0 : _a.length) ? mode.tools : void 0;
1753
+ const toolWarnings = [];
1754
+ if (tools == null) {
1755
+ return { tools: void 0, tool_choice: void 0, toolWarnings };
1756
+ }
1757
+ const toolChoice = mode.toolChoice;
1758
+ const openaiTools = [];
1759
+ for (const tool of tools) {
1760
+ switch (tool.type) {
1761
+ case "function":
1762
+ openaiTools.push({
1763
+ type: "function",
1764
+ name: tool.name,
1765
+ description: tool.description,
1766
+ parameters: tool.parameters,
1767
+ strict: strict ? true : void 0
1768
+ });
1769
+ break;
1770
+ case "provider-defined":
1771
+ switch (tool.id) {
1772
+ case "openai.web_search_preview":
1773
+ openaiTools.push({
1774
+ type: "web_search_preview",
1775
+ search_context_size: tool.args.searchContextSize,
1776
+ user_location: tool.args.userLocation
1777
+ });
1778
+ break;
1779
+ default:
1780
+ toolWarnings.push({ type: "unsupported-tool", tool });
1781
+ break;
1782
+ }
1783
+ break;
1784
+ default:
1785
+ toolWarnings.push({ type: "unsupported-tool", tool });
1786
+ break;
1787
+ }
1788
+ }
1789
+ if (toolChoice == null) {
1790
+ return { tools: openaiTools, tool_choice: void 0, toolWarnings };
1791
+ }
1792
+ const type = toolChoice.type;
1793
+ switch (type) {
1794
+ case "auto":
1795
+ case "none":
1796
+ case "required":
1797
+ return { tools: openaiTools, tool_choice: type, toolWarnings };
1798
+ case "tool":
1799
+ return {
1800
+ tools: openaiTools,
1801
+ tool_choice: {
1802
+ type: "function",
1803
+ name: toolChoice.toolName
1804
+ },
1805
+ toolWarnings
1806
+ };
1807
+ default: {
1808
+ const _exhaustiveCheck = type;
1809
+ throw new UnsupportedFunctionalityError7({
1810
+ functionality: `Unsupported tool choice type: ${_exhaustiveCheck}`
1811
+ });
1812
+ }
1813
+ }
1814
+ }
1815
+
1816
+ // src/responses/openai-responses-language-model.ts
1817
+ var OpenAIResponsesLanguageModel = class {
1818
+ constructor(modelId, config) {
1819
+ this.specificationVersion = "v1";
1820
+ this.defaultObjectGenerationMode = "json";
1821
+ this.modelId = modelId;
1822
+ this.config = config;
1823
+ }
1824
+ get provider() {
1825
+ return this.config.provider;
1826
+ }
1827
+ getArgs({
1828
+ mode,
1829
+ maxTokens,
1830
+ temperature,
1831
+ stopSequences,
1832
+ topP,
1833
+ topK,
1834
+ presencePenalty,
1835
+ frequencyPenalty,
1836
+ seed,
1837
+ prompt,
1838
+ providerMetadata,
1839
+ responseFormat
1840
+ }) {
1841
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k;
1842
+ const warnings = [];
1843
+ const modelConfig = getResponsesModelConfig(this.modelId);
1844
+ const type = mode.type;
1845
+ if (topK != null) {
1846
+ warnings.push({
1847
+ type: "unsupported-setting",
1848
+ setting: "topK"
1849
+ });
1850
+ }
1851
+ if (seed != null) {
1852
+ warnings.push({
1853
+ type: "unsupported-setting",
1854
+ setting: "seed"
1855
+ });
1856
+ }
1857
+ if (presencePenalty != null) {
1858
+ warnings.push({
1859
+ type: "unsupported-setting",
1860
+ setting: "presencePenalty"
1861
+ });
1862
+ }
1863
+ if (frequencyPenalty != null) {
1864
+ warnings.push({
1865
+ type: "unsupported-setting",
1866
+ setting: "frequencyPenalty"
1867
+ });
1868
+ }
1869
+ if (stopSequences != null) {
1870
+ warnings.push({
1871
+ type: "unsupported-setting",
1872
+ setting: "stopSequences"
1873
+ });
1874
+ }
1875
+ const { messages, warnings: messageWarnings } = convertToOpenAIResponsesMessages({
1876
+ prompt,
1877
+ systemMessageMode: modelConfig.systemMessageMode
1878
+ });
1879
+ warnings.push(...messageWarnings);
1880
+ const isStrictJsonSchema = (_b = (_a = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _a.strictJsonSchema) != null ? _b : true;
1881
+ const baseArgs = {
1882
+ model: this.modelId,
1883
+ input: messages,
1884
+ temperature,
1885
+ top_p: topP,
1886
+ max_output_tokens: maxTokens,
1887
+ ...(responseFormat == null ? void 0 : responseFormat.type) === "json" && {
1888
+ text: {
1889
+ format: responseFormat.schema != null ? {
1890
+ type: "json_schema",
1891
+ strict: isStrictJsonSchema,
1892
+ name: (_c = responseFormat.name) != null ? _c : "response",
1893
+ description: responseFormat.description,
1894
+ schema: responseFormat.schema
1895
+ } : { type: "json_object" }
1896
+ }
1897
+ },
1898
+ // provider options:
1899
+ metadata: (_d = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _d.metadata,
1900
+ parallel_tool_calls: (_e = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _e.parallelToolCalls,
1901
+ previous_response_id: (_f = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _f.previousResponseId,
1902
+ store: (_g = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _g.store,
1903
+ user: (_h = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _h.user,
1904
+ // model-specific settings:
1905
+ ...modelConfig.isReasoningModel && ((_i = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _i.reasoningEffort) != null && {
1906
+ reasoning: { effort: (_j = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _j.reasoningEffort }
1907
+ },
1908
+ ...modelConfig.requiredAutoTruncation && {
1909
+ truncation: "auto"
1910
+ }
1911
+ };
1912
+ if (modelConfig.isReasoningModel) {
1913
+ if (baseArgs.temperature != null) {
1914
+ baseArgs.temperature = void 0;
1915
+ warnings.push({
1916
+ type: "unsupported-setting",
1917
+ setting: "temperature",
1918
+ details: "temperature is not supported for reasoning models"
1919
+ });
1920
+ }
1921
+ if (baseArgs.top_p != null) {
1922
+ baseArgs.top_p = void 0;
1923
+ warnings.push({
1924
+ type: "unsupported-setting",
1925
+ setting: "topP",
1926
+ details: "topP is not supported for reasoning models"
1927
+ });
1928
+ }
1929
+ }
1930
+ switch (type) {
1931
+ case "regular": {
1932
+ const { tools, tool_choice, toolWarnings } = prepareResponsesTools({
1933
+ mode,
1934
+ strict: true
1935
+ });
1936
+ return {
1937
+ args: {
1938
+ ...baseArgs,
1939
+ tools,
1940
+ tool_choice
1941
+ },
1942
+ warnings: [...warnings, ...toolWarnings]
1943
+ };
1944
+ }
1945
+ case "object-json": {
1946
+ return {
1947
+ args: {
1948
+ ...baseArgs,
1949
+ text: {
1950
+ format: mode.schema != null ? {
1951
+ type: "json_schema",
1952
+ strict: isStrictJsonSchema,
1953
+ name: (_k = mode.name) != null ? _k : "response",
1954
+ description: mode.description,
1955
+ schema: mode.schema
1956
+ } : { type: "json_object" }
1957
+ }
1958
+ },
1959
+ warnings
1960
+ };
1961
+ }
1962
+ case "object-tool": {
1963
+ return {
1964
+ args: {
1965
+ ...baseArgs,
1966
+ tool_choice: { type: "function", name: mode.tool.name },
1967
+ tools: [
1968
+ {
1969
+ type: "function",
1970
+ name: mode.tool.name,
1971
+ description: mode.tool.description,
1972
+ parameters: mode.tool.parameters,
1973
+ strict: isStrictJsonSchema
1974
+ }
1975
+ ]
1976
+ },
1977
+ warnings
1978
+ };
1979
+ }
1980
+ default: {
1981
+ const _exhaustiveCheck = type;
1982
+ throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
1983
+ }
1984
+ }
1985
+ }
1986
+ async doGenerate(options) {
1987
+ var _a, _b, _c, _d, _e;
1988
+ const { args: body, warnings } = this.getArgs(options);
1989
+ const {
1990
+ responseHeaders,
1991
+ value: response,
1992
+ rawValue: rawResponse
1993
+ } = await postJsonToApi5({
1994
+ url: this.config.url({
1995
+ path: "/responses",
1996
+ modelId: this.modelId
1997
+ }),
1998
+ headers: combineHeaders5(this.config.headers(), options.headers),
1999
+ body,
2000
+ failedResponseHandler: openaiFailedResponseHandler,
2001
+ successfulResponseHandler: createJsonResponseHandler5(
2002
+ z6.object({
2003
+ id: z6.string(),
2004
+ created_at: z6.number(),
2005
+ model: z6.string(),
2006
+ output: z6.array(
2007
+ z6.discriminatedUnion("type", [
2008
+ z6.object({
2009
+ type: z6.literal("message"),
2010
+ role: z6.literal("assistant"),
2011
+ content: z6.array(
2012
+ z6.object({
2013
+ type: z6.literal("output_text"),
2014
+ text: z6.string(),
2015
+ annotations: z6.array(
2016
+ z6.object({
2017
+ type: z6.literal("url_citation"),
2018
+ start_index: z6.number(),
2019
+ end_index: z6.number(),
2020
+ url: z6.string(),
2021
+ title: z6.string()
2022
+ })
2023
+ )
2024
+ })
2025
+ )
2026
+ }),
2027
+ z6.object({
2028
+ type: z6.literal("function_call"),
2029
+ call_id: z6.string(),
2030
+ name: z6.string(),
2031
+ arguments: z6.string()
2032
+ }),
2033
+ z6.object({
2034
+ type: z6.literal("web_search_call")
2035
+ }),
2036
+ z6.object({
2037
+ type: z6.literal("computer_call")
2038
+ }),
2039
+ z6.object({
2040
+ type: z6.literal("reasoning")
2041
+ })
2042
+ ])
2043
+ ),
2044
+ incomplete_details: z6.object({ reason: z6.string() }).nullable(),
2045
+ usage: usageSchema
2046
+ })
2047
+ ),
2048
+ abortSignal: options.abortSignal,
2049
+ fetch: this.config.fetch
2050
+ });
2051
+ const outputTextElements = response.output.filter((output) => output.type === "message").flatMap((output) => output.content).filter((content) => content.type === "output_text");
2052
+ const toolCalls = response.output.filter((output) => output.type === "function_call").map((output) => ({
2053
+ toolCallType: "function",
2054
+ toolCallId: output.call_id,
2055
+ toolName: output.name,
2056
+ args: output.arguments
2057
+ }));
2058
+ return {
2059
+ text: outputTextElements.map((content) => content.text).join("\n"),
2060
+ sources: outputTextElements.flatMap(
2061
+ (content) => content.annotations.map((annotation) => {
2062
+ var _a2, _b2, _c2;
2063
+ return {
2064
+ sourceType: "url",
2065
+ id: (_c2 = (_b2 = (_a2 = this.config).generateId) == null ? void 0 : _b2.call(_a2)) != null ? _c2 : generateId2(),
2066
+ url: annotation.url,
2067
+ title: annotation.title
2068
+ };
2069
+ })
2070
+ ),
2071
+ finishReason: mapOpenAIResponseFinishReason({
2072
+ finishReason: (_a = response.incomplete_details) == null ? void 0 : _a.reason,
2073
+ hasToolCalls: toolCalls.length > 0
2074
+ }),
2075
+ toolCalls: toolCalls.length > 0 ? toolCalls : void 0,
2076
+ usage: {
2077
+ promptTokens: response.usage.input_tokens,
2078
+ completionTokens: response.usage.output_tokens
2079
+ },
2080
+ rawCall: {
2081
+ rawPrompt: void 0,
2082
+ rawSettings: {}
2083
+ },
2084
+ rawResponse: {
2085
+ headers: responseHeaders,
2086
+ body: rawResponse
2087
+ },
2088
+ request: {
2089
+ body: JSON.stringify(body)
2090
+ },
2091
+ response: {
2092
+ id: response.id,
2093
+ timestamp: new Date(response.created_at * 1e3),
2094
+ modelId: response.model
2095
+ },
2096
+ providerMetadata: {
2097
+ openai: {
2098
+ responseId: response.id,
2099
+ cachedPromptTokens: (_c = (_b = response.usage.input_tokens_details) == null ? void 0 : _b.cached_tokens) != null ? _c : null,
2100
+ reasoningTokens: (_e = (_d = response.usage.output_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : null
2101
+ }
2102
+ },
2103
+ warnings
2104
+ };
2105
+ }
2106
+ async doStream(options) {
2107
+ const { args: body, warnings } = this.getArgs(options);
2108
+ const { responseHeaders, value: response } = await postJsonToApi5({
2109
+ url: this.config.url({
2110
+ path: "/responses",
2111
+ modelId: this.modelId
2112
+ }),
2113
+ headers: combineHeaders5(this.config.headers(), options.headers),
2114
+ body: {
2115
+ ...body,
2116
+ stream: true
2117
+ },
2118
+ failedResponseHandler: openaiFailedResponseHandler,
2119
+ successfulResponseHandler: createEventSourceResponseHandler3(
2120
+ openaiResponsesChunkSchema
2121
+ ),
2122
+ abortSignal: options.abortSignal,
2123
+ fetch: this.config.fetch
2124
+ });
2125
+ const self = this;
2126
+ let finishReason = "unknown";
2127
+ let promptTokens = NaN;
2128
+ let completionTokens = NaN;
2129
+ let cachedPromptTokens = null;
2130
+ let reasoningTokens = null;
2131
+ let responseId = null;
2132
+ const ongoingToolCalls = {};
2133
+ let hasToolCalls = false;
2134
+ return {
2135
+ stream: response.pipeThrough(
2136
+ new TransformStream({
2137
+ transform(chunk, controller) {
2138
+ var _a, _b, _c, _d, _e, _f, _g, _h;
2139
+ if (!chunk.success) {
2140
+ finishReason = "error";
2141
+ controller.enqueue({ type: "error", error: chunk.error });
2142
+ return;
2143
+ }
2144
+ const value = chunk.value;
2145
+ if (isResponseOutputItemAddedChunk(value)) {
2146
+ if (value.item.type === "function_call") {
2147
+ ongoingToolCalls[value.output_index] = {
2148
+ toolName: value.item.name,
2149
+ toolCallId: value.item.call_id
2150
+ };
2151
+ controller.enqueue({
2152
+ type: "tool-call-delta",
2153
+ toolCallType: "function",
2154
+ toolCallId: value.item.call_id,
2155
+ toolName: value.item.name,
2156
+ argsTextDelta: value.item.arguments
2157
+ });
2158
+ }
2159
+ } else if (isResponseFunctionCallArgumentsDeltaChunk(value)) {
2160
+ const toolCall = ongoingToolCalls[value.output_index];
2161
+ if (toolCall != null) {
2162
+ controller.enqueue({
2163
+ type: "tool-call-delta",
2164
+ toolCallType: "function",
2165
+ toolCallId: toolCall.toolCallId,
2166
+ toolName: toolCall.toolName,
2167
+ argsTextDelta: value.delta
2168
+ });
2169
+ }
2170
+ } else if (isResponseCreatedChunk(value)) {
2171
+ responseId = value.response.id;
2172
+ controller.enqueue({
2173
+ type: "response-metadata",
2174
+ id: value.response.id,
2175
+ timestamp: new Date(value.response.created_at * 1e3),
2176
+ modelId: value.response.model
2177
+ });
2178
+ } else if (isTextDeltaChunk(value)) {
2179
+ controller.enqueue({
2180
+ type: "text-delta",
2181
+ textDelta: value.delta
2182
+ });
2183
+ } else if (isResponseOutputItemDoneChunk(value) && value.item.type === "function_call") {
2184
+ ongoingToolCalls[value.output_index] = void 0;
2185
+ hasToolCalls = true;
2186
+ controller.enqueue({
2187
+ type: "tool-call",
2188
+ toolCallType: "function",
2189
+ toolCallId: value.item.call_id,
2190
+ toolName: value.item.name,
2191
+ args: value.item.arguments
2192
+ });
2193
+ } else if (isResponseFinishedChunk(value)) {
2194
+ finishReason = mapOpenAIResponseFinishReason({
2195
+ finishReason: (_a = value.response.incomplete_details) == null ? void 0 : _a.reason,
2196
+ hasToolCalls
2197
+ });
2198
+ promptTokens = value.response.usage.input_tokens;
2199
+ completionTokens = value.response.usage.output_tokens;
2200
+ cachedPromptTokens = (_c = (_b = value.response.usage.input_tokens_details) == null ? void 0 : _b.cached_tokens) != null ? _c : cachedPromptTokens;
2201
+ reasoningTokens = (_e = (_d = value.response.usage.output_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : reasoningTokens;
2202
+ } else if (isResponseAnnotationAddedChunk(value)) {
2203
+ controller.enqueue({
2204
+ type: "source",
2205
+ source: {
2206
+ sourceType: "url",
2207
+ id: (_h = (_g = (_f = self.config).generateId) == null ? void 0 : _g.call(_f)) != null ? _h : generateId2(),
2208
+ url: value.annotation.url,
2209
+ title: value.annotation.title
2210
+ }
2211
+ });
2212
+ }
2213
+ },
2214
+ flush(controller) {
2215
+ controller.enqueue({
2216
+ type: "finish",
2217
+ finishReason,
2218
+ usage: { promptTokens, completionTokens },
2219
+ ...(cachedPromptTokens != null || reasoningTokens != null) && {
2220
+ providerMetadata: {
2221
+ openai: {
2222
+ responseId,
2223
+ cachedPromptTokens,
2224
+ reasoningTokens
2225
+ }
2226
+ }
2227
+ }
2228
+ });
2229
+ }
2230
+ })
2231
+ ),
2232
+ rawCall: {
2233
+ rawPrompt: void 0,
2234
+ rawSettings: {}
2235
+ },
2236
+ rawResponse: { headers: responseHeaders },
2237
+ request: { body: JSON.stringify(body) },
2238
+ warnings
2239
+ };
2240
+ }
2241
+ };
2242
+ var usageSchema = z6.object({
2243
+ input_tokens: z6.number(),
2244
+ input_tokens_details: z6.object({ cached_tokens: z6.number().nullish() }).nullish(),
2245
+ output_tokens: z6.number(),
2246
+ output_tokens_details: z6.object({ reasoning_tokens: z6.number().nullish() }).nullish()
2247
+ });
2248
+ var textDeltaChunkSchema = z6.object({
2249
+ type: z6.literal("response.output_text.delta"),
2250
+ delta: z6.string()
2251
+ });
2252
+ var responseFinishedChunkSchema = z6.object({
2253
+ type: z6.enum(["response.completed", "response.incomplete"]),
2254
+ response: z6.object({
2255
+ incomplete_details: z6.object({ reason: z6.string() }).nullish(),
2256
+ usage: usageSchema
2257
+ })
2258
+ });
2259
+ var responseCreatedChunkSchema = z6.object({
2260
+ type: z6.literal("response.created"),
2261
+ response: z6.object({
2262
+ id: z6.string(),
2263
+ created_at: z6.number(),
2264
+ model: z6.string()
2265
+ })
2266
+ });
2267
+ var responseOutputItemDoneSchema = z6.object({
2268
+ type: z6.literal("response.output_item.done"),
2269
+ output_index: z6.number(),
2270
+ item: z6.discriminatedUnion("type", [
2271
+ z6.object({
2272
+ type: z6.literal("message")
2273
+ }),
2274
+ z6.object({
2275
+ type: z6.literal("function_call"),
2276
+ id: z6.string(),
2277
+ call_id: z6.string(),
2278
+ name: z6.string(),
2279
+ arguments: z6.string(),
2280
+ status: z6.literal("completed")
2281
+ })
2282
+ ])
2283
+ });
2284
+ var responseFunctionCallArgumentsDeltaSchema = z6.object({
2285
+ type: z6.literal("response.function_call_arguments.delta"),
2286
+ item_id: z6.string(),
2287
+ output_index: z6.number(),
2288
+ delta: z6.string()
2289
+ });
2290
+ var responseOutputItemAddedSchema = z6.object({
2291
+ type: z6.literal("response.output_item.added"),
2292
+ output_index: z6.number(),
2293
+ item: z6.discriminatedUnion("type", [
2294
+ z6.object({
2295
+ type: z6.literal("message")
2296
+ }),
2297
+ z6.object({
2298
+ type: z6.literal("function_call"),
2299
+ id: z6.string(),
2300
+ call_id: z6.string(),
2301
+ name: z6.string(),
2302
+ arguments: z6.string()
2303
+ })
2304
+ ])
2305
+ });
2306
+ var responseAnnotationAddedSchema = z6.object({
2307
+ type: z6.literal("response.output_text.annotation.added"),
2308
+ annotation: z6.object({
2309
+ type: z6.literal("url_citation"),
2310
+ url: z6.string(),
2311
+ title: z6.string()
2312
+ })
2313
+ });
2314
+ var openaiResponsesChunkSchema = z6.union([
2315
+ textDeltaChunkSchema,
2316
+ responseFinishedChunkSchema,
2317
+ responseCreatedChunkSchema,
2318
+ responseOutputItemDoneSchema,
2319
+ responseFunctionCallArgumentsDeltaSchema,
2320
+ responseOutputItemAddedSchema,
2321
+ responseAnnotationAddedSchema,
2322
+ z6.object({ type: z6.string() }).passthrough()
2323
+ // fallback for unknown chunks
2324
+ ]);
2325
+ function isTextDeltaChunk(chunk) {
2326
+ return chunk.type === "response.output_text.delta";
2327
+ }
2328
+ function isResponseOutputItemDoneChunk(chunk) {
2329
+ return chunk.type === "response.output_item.done";
2330
+ }
2331
+ function isResponseFinishedChunk(chunk) {
2332
+ return chunk.type === "response.completed" || chunk.type === "response.incomplete";
2333
+ }
2334
+ function isResponseCreatedChunk(chunk) {
2335
+ return chunk.type === "response.created";
2336
+ }
2337
+ function isResponseFunctionCallArgumentsDeltaChunk(chunk) {
2338
+ return chunk.type === "response.function_call_arguments.delta";
2339
+ }
2340
+ function isResponseOutputItemAddedChunk(chunk) {
2341
+ return chunk.type === "response.output_item.added";
2342
+ }
2343
+ function isResponseAnnotationAddedChunk(chunk) {
2344
+ return chunk.type === "response.output_text.annotation.added";
2345
+ }
2346
+ function getResponsesModelConfig(modelId) {
2347
+ if (modelId.startsWith("o")) {
2348
+ if (modelId.startsWith("o1-mini") || modelId.startsWith("o1-preview")) {
2349
+ return {
2350
+ isReasoningModel: true,
2351
+ systemMessageMode: "remove",
2352
+ requiredAutoTruncation: false
2353
+ };
2354
+ }
2355
+ return {
2356
+ isReasoningModel: true,
2357
+ systemMessageMode: "developer",
2358
+ requiredAutoTruncation: false
2359
+ };
2360
+ }
2361
+ return {
2362
+ isReasoningModel: false,
2363
+ systemMessageMode: "system",
2364
+ requiredAutoTruncation: false
2365
+ };
2366
+ }
1603
2367
  export {
1604
2368
  OpenAIChatLanguageModel,
1605
2369
  OpenAICompletionLanguageModel,
1606
2370
  OpenAIEmbeddingModel,
1607
2371
  OpenAIImageModel,
2372
+ OpenAIResponsesLanguageModel,
1608
2373
  modelMaxImagesPerCall
1609
2374
  };
1610
2375
  //# sourceMappingURL=index.mjs.map