@ai-sdk/openai 1.2.0 → 1.2.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +15 -0
- package/dist/index.d.mts +31 -0
- package/dist/index.d.ts +31 -0
- package/dist/index.js +817 -24
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +824 -21
- package/dist/index.mjs.map +1 -1
- package/internal/dist/index.d.mts +16 -1
- package/internal/dist/index.d.ts +16 -1
- package/internal/dist/index.js +780 -16
- package/internal/dist/index.js.map +1 -1
- package/internal/dist/index.mjs +789 -16
- package/internal/dist/index.mjs.map +1 -1
- package/package.json +5 -4
package/dist/index.mjs
CHANGED
|
@@ -30,6 +30,7 @@ function convertToOpenAIChatMessages({
|
|
|
30
30
|
systemMessageMode = "system"
|
|
31
31
|
}) {
|
|
32
32
|
const messages = [];
|
|
33
|
+
const warnings = [];
|
|
33
34
|
for (const { role, content } of prompt) {
|
|
34
35
|
switch (role) {
|
|
35
36
|
case "system": {
|
|
@@ -43,6 +44,10 @@ function convertToOpenAIChatMessages({
|
|
|
43
44
|
break;
|
|
44
45
|
}
|
|
45
46
|
case "remove": {
|
|
47
|
+
warnings.push({
|
|
48
|
+
type: "other",
|
|
49
|
+
message: "system messages are removed for this model"
|
|
50
|
+
});
|
|
46
51
|
break;
|
|
47
52
|
}
|
|
48
53
|
default: {
|
|
@@ -183,7 +188,7 @@ function convertToOpenAIChatMessages({
|
|
|
183
188
|
}
|
|
184
189
|
}
|
|
185
190
|
}
|
|
186
|
-
return messages;
|
|
191
|
+
return { messages, warnings };
|
|
187
192
|
}
|
|
188
193
|
|
|
189
194
|
// src/map-openai-chat-logprobs.ts
|
|
@@ -306,12 +311,12 @@ function prepareTools({
|
|
|
306
311
|
};
|
|
307
312
|
}
|
|
308
313
|
}
|
|
309
|
-
const
|
|
314
|
+
const openaiTools2 = [];
|
|
310
315
|
for (const tool of tools) {
|
|
311
316
|
if (tool.type === "provider-defined") {
|
|
312
317
|
toolWarnings.push({ type: "unsupported-tool", tool });
|
|
313
318
|
} else {
|
|
314
|
-
|
|
319
|
+
openaiTools2.push({
|
|
315
320
|
type: "function",
|
|
316
321
|
function: {
|
|
317
322
|
name: tool.name,
|
|
@@ -323,17 +328,17 @@ function prepareTools({
|
|
|
323
328
|
}
|
|
324
329
|
}
|
|
325
330
|
if (toolChoice == null) {
|
|
326
|
-
return { tools:
|
|
331
|
+
return { tools: openaiTools2, tool_choice: void 0, toolWarnings };
|
|
327
332
|
}
|
|
328
333
|
const type = toolChoice.type;
|
|
329
334
|
switch (type) {
|
|
330
335
|
case "auto":
|
|
331
336
|
case "none":
|
|
332
337
|
case "required":
|
|
333
|
-
return { tools:
|
|
338
|
+
return { tools: openaiTools2, tool_choice: type, toolWarnings };
|
|
334
339
|
case "tool":
|
|
335
340
|
return {
|
|
336
|
-
tools:
|
|
341
|
+
tools: openaiTools2,
|
|
337
342
|
tool_choice: {
|
|
338
343
|
type: "function",
|
|
339
344
|
function: {
|
|
@@ -416,12 +421,14 @@ var OpenAIChatLanguageModel = class {
|
|
|
416
421
|
functionality: "structuredOutputs with useLegacyFunctionCalling"
|
|
417
422
|
});
|
|
418
423
|
}
|
|
419
|
-
|
|
420
|
-
|
|
421
|
-
|
|
422
|
-
|
|
423
|
-
|
|
424
|
-
|
|
424
|
+
const { messages, warnings: messageWarnings } = convertToOpenAIChatMessages(
|
|
425
|
+
{
|
|
426
|
+
prompt,
|
|
427
|
+
useLegacyFunctionCalling,
|
|
428
|
+
systemMessageMode: getSystemMessageMode(this.modelId)
|
|
429
|
+
}
|
|
430
|
+
);
|
|
431
|
+
warnings.push(...messageWarnings);
|
|
425
432
|
const baseArgs = {
|
|
426
433
|
// model id:
|
|
427
434
|
model: this.modelId,
|
|
@@ -456,11 +463,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
456
463
|
prediction: (_e = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _e.prediction,
|
|
457
464
|
reasoning_effort: (_g = (_f = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _f.reasoningEffort) != null ? _g : this.settings.reasoningEffort,
|
|
458
465
|
// messages:
|
|
459
|
-
messages
|
|
460
|
-
prompt,
|
|
461
|
-
useLegacyFunctionCalling,
|
|
462
|
-
systemMessageMode: getSystemMessageMode(this.modelId)
|
|
463
|
-
})
|
|
466
|
+
messages
|
|
464
467
|
};
|
|
465
468
|
if (isReasoningModel(this.modelId)) {
|
|
466
469
|
if (baseArgs.temperature != null) {
|
|
@@ -602,7 +605,11 @@ var OpenAIChatLanguageModel = class {
|
|
|
602
605
|
async doGenerate(options) {
|
|
603
606
|
var _a, _b, _c, _d, _e, _f, _g, _h;
|
|
604
607
|
const { args: body, warnings } = this.getArgs(options);
|
|
605
|
-
const {
|
|
608
|
+
const {
|
|
609
|
+
responseHeaders,
|
|
610
|
+
value: response,
|
|
611
|
+
rawValue: rawResponse
|
|
612
|
+
} = await postJsonToApi({
|
|
606
613
|
url: this.config.url({
|
|
607
614
|
path: "/chat/completions",
|
|
608
615
|
modelId: this.modelId
|
|
@@ -657,7 +664,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
657
664
|
completionTokens: (_h = (_g = response.usage) == null ? void 0 : _g.completion_tokens) != null ? _h : NaN
|
|
658
665
|
},
|
|
659
666
|
rawCall: { rawPrompt, rawSettings },
|
|
660
|
-
rawResponse: { headers: responseHeaders },
|
|
667
|
+
rawResponse: { headers: responseHeaders, body: rawResponse },
|
|
661
668
|
request: { body: JSON.stringify(body) },
|
|
662
669
|
response: getResponseMetadata(response),
|
|
663
670
|
warnings,
|
|
@@ -1269,7 +1276,11 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1269
1276
|
}
|
|
1270
1277
|
async doGenerate(options) {
|
|
1271
1278
|
const { args, warnings } = this.getArgs(options);
|
|
1272
|
-
const {
|
|
1279
|
+
const {
|
|
1280
|
+
responseHeaders,
|
|
1281
|
+
value: response,
|
|
1282
|
+
rawValue: rawResponse
|
|
1283
|
+
} = await postJsonToApi2({
|
|
1273
1284
|
url: this.config.url({
|
|
1274
1285
|
path: "/completions",
|
|
1275
1286
|
modelId: this.modelId
|
|
@@ -1294,7 +1305,7 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1294
1305
|
finishReason: mapOpenAIFinishReason(choice.finish_reason),
|
|
1295
1306
|
logprobs: mapOpenAICompletionLogProbs(choice.logprobs),
|
|
1296
1307
|
rawCall: { rawPrompt, rawSettings },
|
|
1297
|
-
rawResponse: { headers: responseHeaders },
|
|
1308
|
+
rawResponse: { headers: responseHeaders, body: rawResponse },
|
|
1298
1309
|
response: getResponseMetadata(response),
|
|
1299
1310
|
warnings,
|
|
1300
1311
|
request: { body: JSON.stringify(args) }
|
|
@@ -1599,6 +1610,788 @@ var openaiImageResponseSchema = z5.object({
|
|
|
1599
1610
|
data: z5.array(z5.object({ b64_json: z5.string() }))
|
|
1600
1611
|
});
|
|
1601
1612
|
|
|
1613
|
+
// src/responses/openai-responses-language-model.ts
|
|
1614
|
+
import {
|
|
1615
|
+
combineHeaders as combineHeaders5,
|
|
1616
|
+
createEventSourceResponseHandler as createEventSourceResponseHandler3,
|
|
1617
|
+
createJsonResponseHandler as createJsonResponseHandler5,
|
|
1618
|
+
generateId as generateId2,
|
|
1619
|
+
postJsonToApi as postJsonToApi5
|
|
1620
|
+
} from "@ai-sdk/provider-utils";
|
|
1621
|
+
import { z as z6 } from "zod";
|
|
1622
|
+
|
|
1623
|
+
// src/responses/convert-to-openai-responses-messages.ts
|
|
1624
|
+
import {
|
|
1625
|
+
UnsupportedFunctionalityError as UnsupportedFunctionalityError6
|
|
1626
|
+
} from "@ai-sdk/provider";
|
|
1627
|
+
import { convertUint8ArrayToBase64 as convertUint8ArrayToBase642 } from "@ai-sdk/provider-utils";
|
|
1628
|
+
function convertToOpenAIResponsesMessages({
|
|
1629
|
+
prompt,
|
|
1630
|
+
systemMessageMode
|
|
1631
|
+
}) {
|
|
1632
|
+
const messages = [];
|
|
1633
|
+
const warnings = [];
|
|
1634
|
+
for (const { role, content } of prompt) {
|
|
1635
|
+
switch (role) {
|
|
1636
|
+
case "system": {
|
|
1637
|
+
switch (systemMessageMode) {
|
|
1638
|
+
case "system": {
|
|
1639
|
+
messages.push({ role: "system", content });
|
|
1640
|
+
break;
|
|
1641
|
+
}
|
|
1642
|
+
case "developer": {
|
|
1643
|
+
messages.push({ role: "developer", content });
|
|
1644
|
+
break;
|
|
1645
|
+
}
|
|
1646
|
+
case "remove": {
|
|
1647
|
+
warnings.push({
|
|
1648
|
+
type: "other",
|
|
1649
|
+
message: "system messages are removed for this model"
|
|
1650
|
+
});
|
|
1651
|
+
break;
|
|
1652
|
+
}
|
|
1653
|
+
default: {
|
|
1654
|
+
const _exhaustiveCheck = systemMessageMode;
|
|
1655
|
+
throw new Error(
|
|
1656
|
+
`Unsupported system message mode: ${_exhaustiveCheck}`
|
|
1657
|
+
);
|
|
1658
|
+
}
|
|
1659
|
+
}
|
|
1660
|
+
break;
|
|
1661
|
+
}
|
|
1662
|
+
case "user": {
|
|
1663
|
+
messages.push({
|
|
1664
|
+
role: "user",
|
|
1665
|
+
content: content.map((part) => {
|
|
1666
|
+
var _a, _b, _c;
|
|
1667
|
+
switch (part.type) {
|
|
1668
|
+
case "text": {
|
|
1669
|
+
return { type: "input_text", text: part.text };
|
|
1670
|
+
}
|
|
1671
|
+
case "image": {
|
|
1672
|
+
return {
|
|
1673
|
+
type: "input_image",
|
|
1674
|
+
image_url: part.image instanceof URL ? part.image.toString() : `data:${(_a = part.mimeType) != null ? _a : "image/jpeg"};base64,${convertUint8ArrayToBase642(part.image)}`,
|
|
1675
|
+
// OpenAI specific extension: image detail
|
|
1676
|
+
detail: (_c = (_b = part.providerMetadata) == null ? void 0 : _b.openai) == null ? void 0 : _c.imageDetail
|
|
1677
|
+
};
|
|
1678
|
+
}
|
|
1679
|
+
case "file": {
|
|
1680
|
+
throw new UnsupportedFunctionalityError6({
|
|
1681
|
+
functionality: "Image content parts in user messages"
|
|
1682
|
+
});
|
|
1683
|
+
}
|
|
1684
|
+
}
|
|
1685
|
+
})
|
|
1686
|
+
});
|
|
1687
|
+
break;
|
|
1688
|
+
}
|
|
1689
|
+
case "assistant": {
|
|
1690
|
+
for (const part of content) {
|
|
1691
|
+
switch (part.type) {
|
|
1692
|
+
case "text": {
|
|
1693
|
+
messages.push({
|
|
1694
|
+
role: "assistant",
|
|
1695
|
+
content: [{ type: "output_text", text: part.text }]
|
|
1696
|
+
});
|
|
1697
|
+
break;
|
|
1698
|
+
}
|
|
1699
|
+
case "tool-call": {
|
|
1700
|
+
messages.push({
|
|
1701
|
+
type: "function_call",
|
|
1702
|
+
call_id: part.toolCallId,
|
|
1703
|
+
name: part.toolName,
|
|
1704
|
+
arguments: JSON.stringify(part.args)
|
|
1705
|
+
});
|
|
1706
|
+
break;
|
|
1707
|
+
}
|
|
1708
|
+
}
|
|
1709
|
+
}
|
|
1710
|
+
break;
|
|
1711
|
+
}
|
|
1712
|
+
case "tool": {
|
|
1713
|
+
for (const part of content) {
|
|
1714
|
+
messages.push({
|
|
1715
|
+
type: "function_call_output",
|
|
1716
|
+
call_id: part.toolCallId,
|
|
1717
|
+
output: JSON.stringify(part.result)
|
|
1718
|
+
});
|
|
1719
|
+
}
|
|
1720
|
+
break;
|
|
1721
|
+
}
|
|
1722
|
+
default: {
|
|
1723
|
+
const _exhaustiveCheck = role;
|
|
1724
|
+
throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
|
|
1725
|
+
}
|
|
1726
|
+
}
|
|
1727
|
+
}
|
|
1728
|
+
return { messages, warnings };
|
|
1729
|
+
}
|
|
1730
|
+
|
|
1731
|
+
// src/responses/map-openai-responses-finish-reason.ts
|
|
1732
|
+
function mapOpenAIResponseFinishReason({
|
|
1733
|
+
finishReason,
|
|
1734
|
+
hasToolCalls
|
|
1735
|
+
}) {
|
|
1736
|
+
switch (finishReason) {
|
|
1737
|
+
case void 0:
|
|
1738
|
+
case null:
|
|
1739
|
+
return hasToolCalls ? "tool-calls" : "stop";
|
|
1740
|
+
case "max_output_tokens":
|
|
1741
|
+
return "length";
|
|
1742
|
+
case "content_filter":
|
|
1743
|
+
return "content-filter";
|
|
1744
|
+
default:
|
|
1745
|
+
return hasToolCalls ? "tool-calls" : "unknown";
|
|
1746
|
+
}
|
|
1747
|
+
}
|
|
1748
|
+
|
|
1749
|
+
// src/responses/openai-responses-prepare-tools.ts
|
|
1750
|
+
import {
|
|
1751
|
+
UnsupportedFunctionalityError as UnsupportedFunctionalityError7
|
|
1752
|
+
} from "@ai-sdk/provider";
|
|
1753
|
+
function prepareResponsesTools({
|
|
1754
|
+
mode,
|
|
1755
|
+
strict
|
|
1756
|
+
}) {
|
|
1757
|
+
var _a;
|
|
1758
|
+
const tools = ((_a = mode.tools) == null ? void 0 : _a.length) ? mode.tools : void 0;
|
|
1759
|
+
const toolWarnings = [];
|
|
1760
|
+
if (tools == null) {
|
|
1761
|
+
return { tools: void 0, tool_choice: void 0, toolWarnings };
|
|
1762
|
+
}
|
|
1763
|
+
const toolChoice = mode.toolChoice;
|
|
1764
|
+
const openaiTools2 = [];
|
|
1765
|
+
for (const tool of tools) {
|
|
1766
|
+
switch (tool.type) {
|
|
1767
|
+
case "function":
|
|
1768
|
+
openaiTools2.push({
|
|
1769
|
+
type: "function",
|
|
1770
|
+
name: tool.name,
|
|
1771
|
+
description: tool.description,
|
|
1772
|
+
parameters: tool.parameters,
|
|
1773
|
+
strict: strict ? true : void 0
|
|
1774
|
+
});
|
|
1775
|
+
break;
|
|
1776
|
+
case "provider-defined":
|
|
1777
|
+
switch (tool.id) {
|
|
1778
|
+
case "openai.web_search_preview":
|
|
1779
|
+
openaiTools2.push({
|
|
1780
|
+
type: "web_search_preview",
|
|
1781
|
+
search_context_size: tool.args.searchContextSize,
|
|
1782
|
+
user_location: tool.args.userLocation
|
|
1783
|
+
});
|
|
1784
|
+
break;
|
|
1785
|
+
default:
|
|
1786
|
+
toolWarnings.push({ type: "unsupported-tool", tool });
|
|
1787
|
+
break;
|
|
1788
|
+
}
|
|
1789
|
+
break;
|
|
1790
|
+
default:
|
|
1791
|
+
toolWarnings.push({ type: "unsupported-tool", tool });
|
|
1792
|
+
break;
|
|
1793
|
+
}
|
|
1794
|
+
}
|
|
1795
|
+
if (toolChoice == null) {
|
|
1796
|
+
return { tools: openaiTools2, tool_choice: void 0, toolWarnings };
|
|
1797
|
+
}
|
|
1798
|
+
const type = toolChoice.type;
|
|
1799
|
+
switch (type) {
|
|
1800
|
+
case "auto":
|
|
1801
|
+
case "none":
|
|
1802
|
+
case "required":
|
|
1803
|
+
return { tools: openaiTools2, tool_choice: type, toolWarnings };
|
|
1804
|
+
case "tool":
|
|
1805
|
+
return {
|
|
1806
|
+
tools: openaiTools2,
|
|
1807
|
+
tool_choice: {
|
|
1808
|
+
type: "function",
|
|
1809
|
+
name: toolChoice.toolName
|
|
1810
|
+
},
|
|
1811
|
+
toolWarnings
|
|
1812
|
+
};
|
|
1813
|
+
default: {
|
|
1814
|
+
const _exhaustiveCheck = type;
|
|
1815
|
+
throw new UnsupportedFunctionalityError7({
|
|
1816
|
+
functionality: `Unsupported tool choice type: ${_exhaustiveCheck}`
|
|
1817
|
+
});
|
|
1818
|
+
}
|
|
1819
|
+
}
|
|
1820
|
+
}
|
|
1821
|
+
|
|
1822
|
+
// src/responses/openai-responses-language-model.ts
|
|
1823
|
+
var OpenAIResponsesLanguageModel = class {
|
|
1824
|
+
constructor(modelId, config) {
|
|
1825
|
+
this.specificationVersion = "v1";
|
|
1826
|
+
this.defaultObjectGenerationMode = "json";
|
|
1827
|
+
this.modelId = modelId;
|
|
1828
|
+
this.config = config;
|
|
1829
|
+
}
|
|
1830
|
+
get provider() {
|
|
1831
|
+
return this.config.provider;
|
|
1832
|
+
}
|
|
1833
|
+
getArgs({
|
|
1834
|
+
mode,
|
|
1835
|
+
maxTokens,
|
|
1836
|
+
temperature,
|
|
1837
|
+
stopSequences,
|
|
1838
|
+
topP,
|
|
1839
|
+
topK,
|
|
1840
|
+
presencePenalty,
|
|
1841
|
+
frequencyPenalty,
|
|
1842
|
+
seed,
|
|
1843
|
+
prompt,
|
|
1844
|
+
providerMetadata,
|
|
1845
|
+
responseFormat
|
|
1846
|
+
}) {
|
|
1847
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k;
|
|
1848
|
+
const warnings = [];
|
|
1849
|
+
const modelConfig = getResponsesModelConfig(this.modelId);
|
|
1850
|
+
const type = mode.type;
|
|
1851
|
+
if (topK != null) {
|
|
1852
|
+
warnings.push({
|
|
1853
|
+
type: "unsupported-setting",
|
|
1854
|
+
setting: "topK"
|
|
1855
|
+
});
|
|
1856
|
+
}
|
|
1857
|
+
if (seed != null) {
|
|
1858
|
+
warnings.push({
|
|
1859
|
+
type: "unsupported-setting",
|
|
1860
|
+
setting: "seed"
|
|
1861
|
+
});
|
|
1862
|
+
}
|
|
1863
|
+
if (presencePenalty != null) {
|
|
1864
|
+
warnings.push({
|
|
1865
|
+
type: "unsupported-setting",
|
|
1866
|
+
setting: "presencePenalty"
|
|
1867
|
+
});
|
|
1868
|
+
}
|
|
1869
|
+
if (frequencyPenalty != null) {
|
|
1870
|
+
warnings.push({
|
|
1871
|
+
type: "unsupported-setting",
|
|
1872
|
+
setting: "frequencyPenalty"
|
|
1873
|
+
});
|
|
1874
|
+
}
|
|
1875
|
+
if (stopSequences != null) {
|
|
1876
|
+
warnings.push({
|
|
1877
|
+
type: "unsupported-setting",
|
|
1878
|
+
setting: "stopSequences"
|
|
1879
|
+
});
|
|
1880
|
+
}
|
|
1881
|
+
const { messages, warnings: messageWarnings } = convertToOpenAIResponsesMessages({
|
|
1882
|
+
prompt,
|
|
1883
|
+
systemMessageMode: modelConfig.systemMessageMode
|
|
1884
|
+
});
|
|
1885
|
+
warnings.push(...messageWarnings);
|
|
1886
|
+
const isStrictJsonSchema = (_b = (_a = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _a.strictJsonSchema) != null ? _b : true;
|
|
1887
|
+
const baseArgs = {
|
|
1888
|
+
model: this.modelId,
|
|
1889
|
+
input: messages,
|
|
1890
|
+
temperature,
|
|
1891
|
+
top_p: topP,
|
|
1892
|
+
max_output_tokens: maxTokens,
|
|
1893
|
+
...(responseFormat == null ? void 0 : responseFormat.type) === "json" && {
|
|
1894
|
+
text: {
|
|
1895
|
+
format: responseFormat.schema != null ? {
|
|
1896
|
+
type: "json_schema",
|
|
1897
|
+
strict: isStrictJsonSchema,
|
|
1898
|
+
name: (_c = responseFormat.name) != null ? _c : "response",
|
|
1899
|
+
description: responseFormat.description,
|
|
1900
|
+
schema: responseFormat.schema
|
|
1901
|
+
} : { type: "json_object" }
|
|
1902
|
+
}
|
|
1903
|
+
},
|
|
1904
|
+
// provider options:
|
|
1905
|
+
metadata: (_d = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _d.metadata,
|
|
1906
|
+
parallel_tool_calls: (_e = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _e.parallelToolCalls,
|
|
1907
|
+
previous_response_id: (_f = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _f.previousResponseId,
|
|
1908
|
+
store: (_g = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _g.store,
|
|
1909
|
+
user: (_h = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _h.user,
|
|
1910
|
+
// model-specific settings:
|
|
1911
|
+
...modelConfig.isReasoningModel && ((_i = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _i.reasoningEffort) != null && {
|
|
1912
|
+
reasoning: { effort: (_j = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _j.reasoningEffort }
|
|
1913
|
+
},
|
|
1914
|
+
...modelConfig.requiredAutoTruncation && {
|
|
1915
|
+
truncation: "auto"
|
|
1916
|
+
}
|
|
1917
|
+
};
|
|
1918
|
+
if (modelConfig.isReasoningModel) {
|
|
1919
|
+
if (baseArgs.temperature != null) {
|
|
1920
|
+
baseArgs.temperature = void 0;
|
|
1921
|
+
warnings.push({
|
|
1922
|
+
type: "unsupported-setting",
|
|
1923
|
+
setting: "temperature",
|
|
1924
|
+
details: "temperature is not supported for reasoning models"
|
|
1925
|
+
});
|
|
1926
|
+
}
|
|
1927
|
+
if (baseArgs.top_p != null) {
|
|
1928
|
+
baseArgs.top_p = void 0;
|
|
1929
|
+
warnings.push({
|
|
1930
|
+
type: "unsupported-setting",
|
|
1931
|
+
setting: "topP",
|
|
1932
|
+
details: "topP is not supported for reasoning models"
|
|
1933
|
+
});
|
|
1934
|
+
}
|
|
1935
|
+
}
|
|
1936
|
+
switch (type) {
|
|
1937
|
+
case "regular": {
|
|
1938
|
+
const { tools, tool_choice, toolWarnings } = prepareResponsesTools({
|
|
1939
|
+
mode,
|
|
1940
|
+
strict: true
|
|
1941
|
+
});
|
|
1942
|
+
return {
|
|
1943
|
+
args: {
|
|
1944
|
+
...baseArgs,
|
|
1945
|
+
tools,
|
|
1946
|
+
tool_choice
|
|
1947
|
+
},
|
|
1948
|
+
warnings: [...warnings, ...toolWarnings]
|
|
1949
|
+
};
|
|
1950
|
+
}
|
|
1951
|
+
case "object-json": {
|
|
1952
|
+
return {
|
|
1953
|
+
args: {
|
|
1954
|
+
...baseArgs,
|
|
1955
|
+
text: {
|
|
1956
|
+
format: mode.schema != null ? {
|
|
1957
|
+
type: "json_schema",
|
|
1958
|
+
strict: isStrictJsonSchema,
|
|
1959
|
+
name: (_k = mode.name) != null ? _k : "response",
|
|
1960
|
+
description: mode.description,
|
|
1961
|
+
schema: mode.schema
|
|
1962
|
+
} : { type: "json_object" }
|
|
1963
|
+
}
|
|
1964
|
+
},
|
|
1965
|
+
warnings
|
|
1966
|
+
};
|
|
1967
|
+
}
|
|
1968
|
+
case "object-tool": {
|
|
1969
|
+
return {
|
|
1970
|
+
args: {
|
|
1971
|
+
...baseArgs,
|
|
1972
|
+
tool_choice: { type: "function", name: mode.tool.name },
|
|
1973
|
+
tools: [
|
|
1974
|
+
{
|
|
1975
|
+
type: "function",
|
|
1976
|
+
name: mode.tool.name,
|
|
1977
|
+
description: mode.tool.description,
|
|
1978
|
+
parameters: mode.tool.parameters,
|
|
1979
|
+
strict: isStrictJsonSchema
|
|
1980
|
+
}
|
|
1981
|
+
]
|
|
1982
|
+
},
|
|
1983
|
+
warnings
|
|
1984
|
+
};
|
|
1985
|
+
}
|
|
1986
|
+
default: {
|
|
1987
|
+
const _exhaustiveCheck = type;
|
|
1988
|
+
throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
|
|
1989
|
+
}
|
|
1990
|
+
}
|
|
1991
|
+
}
|
|
1992
|
+
async doGenerate(options) {
|
|
1993
|
+
var _a, _b, _c, _d, _e;
|
|
1994
|
+
const { args: body, warnings } = this.getArgs(options);
|
|
1995
|
+
const {
|
|
1996
|
+
responseHeaders,
|
|
1997
|
+
value: response,
|
|
1998
|
+
rawValue: rawResponse
|
|
1999
|
+
} = await postJsonToApi5({
|
|
2000
|
+
url: this.config.url({
|
|
2001
|
+
path: "/responses",
|
|
2002
|
+
modelId: this.modelId
|
|
2003
|
+
}),
|
|
2004
|
+
headers: combineHeaders5(this.config.headers(), options.headers),
|
|
2005
|
+
body,
|
|
2006
|
+
failedResponseHandler: openaiFailedResponseHandler,
|
|
2007
|
+
successfulResponseHandler: createJsonResponseHandler5(
|
|
2008
|
+
z6.object({
|
|
2009
|
+
id: z6.string(),
|
|
2010
|
+
created_at: z6.number(),
|
|
2011
|
+
model: z6.string(),
|
|
2012
|
+
output: z6.array(
|
|
2013
|
+
z6.discriminatedUnion("type", [
|
|
2014
|
+
z6.object({
|
|
2015
|
+
type: z6.literal("message"),
|
|
2016
|
+
role: z6.literal("assistant"),
|
|
2017
|
+
content: z6.array(
|
|
2018
|
+
z6.object({
|
|
2019
|
+
type: z6.literal("output_text"),
|
|
2020
|
+
text: z6.string(),
|
|
2021
|
+
annotations: z6.array(
|
|
2022
|
+
z6.object({
|
|
2023
|
+
type: z6.literal("url_citation"),
|
|
2024
|
+
start_index: z6.number(),
|
|
2025
|
+
end_index: z6.number(),
|
|
2026
|
+
url: z6.string(),
|
|
2027
|
+
title: z6.string()
|
|
2028
|
+
})
|
|
2029
|
+
)
|
|
2030
|
+
})
|
|
2031
|
+
)
|
|
2032
|
+
}),
|
|
2033
|
+
z6.object({
|
|
2034
|
+
type: z6.literal("function_call"),
|
|
2035
|
+
call_id: z6.string(),
|
|
2036
|
+
name: z6.string(),
|
|
2037
|
+
arguments: z6.string()
|
|
2038
|
+
}),
|
|
2039
|
+
z6.object({
|
|
2040
|
+
type: z6.literal("web_search_call")
|
|
2041
|
+
}),
|
|
2042
|
+
z6.object({
|
|
2043
|
+
type: z6.literal("computer_call")
|
|
2044
|
+
}),
|
|
2045
|
+
z6.object({
|
|
2046
|
+
type: z6.literal("reasoning")
|
|
2047
|
+
})
|
|
2048
|
+
])
|
|
2049
|
+
),
|
|
2050
|
+
incomplete_details: z6.object({ reason: z6.string() }).nullable(),
|
|
2051
|
+
usage: usageSchema
|
|
2052
|
+
})
|
|
2053
|
+
),
|
|
2054
|
+
abortSignal: options.abortSignal,
|
|
2055
|
+
fetch: this.config.fetch
|
|
2056
|
+
});
|
|
2057
|
+
const outputTextElements = response.output.filter((output) => output.type === "message").flatMap((output) => output.content).filter((content) => content.type === "output_text");
|
|
2058
|
+
const toolCalls = response.output.filter((output) => output.type === "function_call").map((output) => ({
|
|
2059
|
+
toolCallType: "function",
|
|
2060
|
+
toolCallId: output.call_id,
|
|
2061
|
+
toolName: output.name,
|
|
2062
|
+
args: output.arguments
|
|
2063
|
+
}));
|
|
2064
|
+
return {
|
|
2065
|
+
text: outputTextElements.map((content) => content.text).join("\n"),
|
|
2066
|
+
sources: outputTextElements.flatMap(
|
|
2067
|
+
(content) => content.annotations.map((annotation) => {
|
|
2068
|
+
var _a2, _b2, _c2;
|
|
2069
|
+
return {
|
|
2070
|
+
sourceType: "url",
|
|
2071
|
+
id: (_c2 = (_b2 = (_a2 = this.config).generateId) == null ? void 0 : _b2.call(_a2)) != null ? _c2 : generateId2(),
|
|
2072
|
+
url: annotation.url,
|
|
2073
|
+
title: annotation.title
|
|
2074
|
+
};
|
|
2075
|
+
})
|
|
2076
|
+
),
|
|
2077
|
+
finishReason: mapOpenAIResponseFinishReason({
|
|
2078
|
+
finishReason: (_a = response.incomplete_details) == null ? void 0 : _a.reason,
|
|
2079
|
+
hasToolCalls: toolCalls.length > 0
|
|
2080
|
+
}),
|
|
2081
|
+
toolCalls: toolCalls.length > 0 ? toolCalls : void 0,
|
|
2082
|
+
usage: {
|
|
2083
|
+
promptTokens: response.usage.input_tokens,
|
|
2084
|
+
completionTokens: response.usage.output_tokens
|
|
2085
|
+
},
|
|
2086
|
+
rawCall: {
|
|
2087
|
+
rawPrompt: void 0,
|
|
2088
|
+
rawSettings: {}
|
|
2089
|
+
},
|
|
2090
|
+
rawResponse: {
|
|
2091
|
+
headers: responseHeaders,
|
|
2092
|
+
body: rawResponse
|
|
2093
|
+
},
|
|
2094
|
+
request: {
|
|
2095
|
+
body: JSON.stringify(body)
|
|
2096
|
+
},
|
|
2097
|
+
response: {
|
|
2098
|
+
id: response.id,
|
|
2099
|
+
timestamp: new Date(response.created_at * 1e3),
|
|
2100
|
+
modelId: response.model
|
|
2101
|
+
},
|
|
2102
|
+
providerMetadata: {
|
|
2103
|
+
openai: {
|
|
2104
|
+
responseId: response.id,
|
|
2105
|
+
cachedPromptTokens: (_c = (_b = response.usage.input_tokens_details) == null ? void 0 : _b.cached_tokens) != null ? _c : null,
|
|
2106
|
+
reasoningTokens: (_e = (_d = response.usage.output_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : null
|
|
2107
|
+
}
|
|
2108
|
+
},
|
|
2109
|
+
warnings
|
|
2110
|
+
};
|
|
2111
|
+
}
|
|
2112
|
+
async doStream(options) {
|
|
2113
|
+
const { args: body, warnings } = this.getArgs(options);
|
|
2114
|
+
const { responseHeaders, value: response } = await postJsonToApi5({
|
|
2115
|
+
url: this.config.url({
|
|
2116
|
+
path: "/responses",
|
|
2117
|
+
modelId: this.modelId
|
|
2118
|
+
}),
|
|
2119
|
+
headers: combineHeaders5(this.config.headers(), options.headers),
|
|
2120
|
+
body: {
|
|
2121
|
+
...body,
|
|
2122
|
+
stream: true
|
|
2123
|
+
},
|
|
2124
|
+
failedResponseHandler: openaiFailedResponseHandler,
|
|
2125
|
+
successfulResponseHandler: createEventSourceResponseHandler3(
|
|
2126
|
+
openaiResponsesChunkSchema
|
|
2127
|
+
),
|
|
2128
|
+
abortSignal: options.abortSignal,
|
|
2129
|
+
fetch: this.config.fetch
|
|
2130
|
+
});
|
|
2131
|
+
const self = this;
|
|
2132
|
+
let finishReason = "unknown";
|
|
2133
|
+
let promptTokens = NaN;
|
|
2134
|
+
let completionTokens = NaN;
|
|
2135
|
+
let cachedPromptTokens = null;
|
|
2136
|
+
let reasoningTokens = null;
|
|
2137
|
+
let responseId = null;
|
|
2138
|
+
const ongoingToolCalls = {};
|
|
2139
|
+
let hasToolCalls = false;
|
|
2140
|
+
return {
|
|
2141
|
+
stream: response.pipeThrough(
|
|
2142
|
+
new TransformStream({
|
|
2143
|
+
transform(chunk, controller) {
|
|
2144
|
+
var _a, _b, _c, _d, _e, _f, _g, _h;
|
|
2145
|
+
if (!chunk.success) {
|
|
2146
|
+
finishReason = "error";
|
|
2147
|
+
controller.enqueue({ type: "error", error: chunk.error });
|
|
2148
|
+
return;
|
|
2149
|
+
}
|
|
2150
|
+
const value = chunk.value;
|
|
2151
|
+
if (isResponseOutputItemAddedChunk(value)) {
|
|
2152
|
+
if (value.item.type === "function_call") {
|
|
2153
|
+
ongoingToolCalls[value.output_index] = {
|
|
2154
|
+
toolName: value.item.name,
|
|
2155
|
+
toolCallId: value.item.call_id
|
|
2156
|
+
};
|
|
2157
|
+
controller.enqueue({
|
|
2158
|
+
type: "tool-call-delta",
|
|
2159
|
+
toolCallType: "function",
|
|
2160
|
+
toolCallId: value.item.call_id,
|
|
2161
|
+
toolName: value.item.name,
|
|
2162
|
+
argsTextDelta: value.item.arguments
|
|
2163
|
+
});
|
|
2164
|
+
}
|
|
2165
|
+
} else if (isResponseFunctionCallArgumentsDeltaChunk(value)) {
|
|
2166
|
+
const toolCall = ongoingToolCalls[value.output_index];
|
|
2167
|
+
if (toolCall != null) {
|
|
2168
|
+
controller.enqueue({
|
|
2169
|
+
type: "tool-call-delta",
|
|
2170
|
+
toolCallType: "function",
|
|
2171
|
+
toolCallId: toolCall.toolCallId,
|
|
2172
|
+
toolName: toolCall.toolName,
|
|
2173
|
+
argsTextDelta: value.delta
|
|
2174
|
+
});
|
|
2175
|
+
}
|
|
2176
|
+
} else if (isResponseCreatedChunk(value)) {
|
|
2177
|
+
responseId = value.response.id;
|
|
2178
|
+
controller.enqueue({
|
|
2179
|
+
type: "response-metadata",
|
|
2180
|
+
id: value.response.id,
|
|
2181
|
+
timestamp: new Date(value.response.created_at * 1e3),
|
|
2182
|
+
modelId: value.response.model
|
|
2183
|
+
});
|
|
2184
|
+
} else if (isTextDeltaChunk(value)) {
|
|
2185
|
+
controller.enqueue({
|
|
2186
|
+
type: "text-delta",
|
|
2187
|
+
textDelta: value.delta
|
|
2188
|
+
});
|
|
2189
|
+
} else if (isResponseOutputItemDoneChunk(value) && value.item.type === "function_call") {
|
|
2190
|
+
ongoingToolCalls[value.output_index] = void 0;
|
|
2191
|
+
hasToolCalls = true;
|
|
2192
|
+
controller.enqueue({
|
|
2193
|
+
type: "tool-call",
|
|
2194
|
+
toolCallType: "function",
|
|
2195
|
+
toolCallId: value.item.call_id,
|
|
2196
|
+
toolName: value.item.name,
|
|
2197
|
+
args: value.item.arguments
|
|
2198
|
+
});
|
|
2199
|
+
} else if (isResponseFinishedChunk(value)) {
|
|
2200
|
+
finishReason = mapOpenAIResponseFinishReason({
|
|
2201
|
+
finishReason: (_a = value.response.incomplete_details) == null ? void 0 : _a.reason,
|
|
2202
|
+
hasToolCalls
|
|
2203
|
+
});
|
|
2204
|
+
promptTokens = value.response.usage.input_tokens;
|
|
2205
|
+
completionTokens = value.response.usage.output_tokens;
|
|
2206
|
+
cachedPromptTokens = (_c = (_b = value.response.usage.input_tokens_details) == null ? void 0 : _b.cached_tokens) != null ? _c : cachedPromptTokens;
|
|
2207
|
+
reasoningTokens = (_e = (_d = value.response.usage.output_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : reasoningTokens;
|
|
2208
|
+
} else if (isResponseAnnotationAddedChunk(value)) {
|
|
2209
|
+
controller.enqueue({
|
|
2210
|
+
type: "source",
|
|
2211
|
+
source: {
|
|
2212
|
+
sourceType: "url",
|
|
2213
|
+
id: (_h = (_g = (_f = self.config).generateId) == null ? void 0 : _g.call(_f)) != null ? _h : generateId2(),
|
|
2214
|
+
url: value.annotation.url,
|
|
2215
|
+
title: value.annotation.title
|
|
2216
|
+
}
|
|
2217
|
+
});
|
|
2218
|
+
}
|
|
2219
|
+
},
|
|
2220
|
+
flush(controller) {
|
|
2221
|
+
controller.enqueue({
|
|
2222
|
+
type: "finish",
|
|
2223
|
+
finishReason,
|
|
2224
|
+
usage: { promptTokens, completionTokens },
|
|
2225
|
+
...(cachedPromptTokens != null || reasoningTokens != null) && {
|
|
2226
|
+
providerMetadata: {
|
|
2227
|
+
openai: {
|
|
2228
|
+
responseId,
|
|
2229
|
+
cachedPromptTokens,
|
|
2230
|
+
reasoningTokens
|
|
2231
|
+
}
|
|
2232
|
+
}
|
|
2233
|
+
}
|
|
2234
|
+
});
|
|
2235
|
+
}
|
|
2236
|
+
})
|
|
2237
|
+
),
|
|
2238
|
+
rawCall: {
|
|
2239
|
+
rawPrompt: void 0,
|
|
2240
|
+
rawSettings: {}
|
|
2241
|
+
},
|
|
2242
|
+
rawResponse: { headers: responseHeaders },
|
|
2243
|
+
request: { body: JSON.stringify(body) },
|
|
2244
|
+
warnings
|
|
2245
|
+
};
|
|
2246
|
+
}
|
|
2247
|
+
};
|
|
2248
|
+
var usageSchema = z6.object({
|
|
2249
|
+
input_tokens: z6.number(),
|
|
2250
|
+
input_tokens_details: z6.object({ cached_tokens: z6.number().nullish() }).nullish(),
|
|
2251
|
+
output_tokens: z6.number(),
|
|
2252
|
+
output_tokens_details: z6.object({ reasoning_tokens: z6.number().nullish() }).nullish()
|
|
2253
|
+
});
|
|
2254
|
+
var textDeltaChunkSchema = z6.object({
|
|
2255
|
+
type: z6.literal("response.output_text.delta"),
|
|
2256
|
+
delta: z6.string()
|
|
2257
|
+
});
|
|
2258
|
+
var responseFinishedChunkSchema = z6.object({
|
|
2259
|
+
type: z6.enum(["response.completed", "response.incomplete"]),
|
|
2260
|
+
response: z6.object({
|
|
2261
|
+
incomplete_details: z6.object({ reason: z6.string() }).nullish(),
|
|
2262
|
+
usage: usageSchema
|
|
2263
|
+
})
|
|
2264
|
+
});
|
|
2265
|
+
var responseCreatedChunkSchema = z6.object({
|
|
2266
|
+
type: z6.literal("response.created"),
|
|
2267
|
+
response: z6.object({
|
|
2268
|
+
id: z6.string(),
|
|
2269
|
+
created_at: z6.number(),
|
|
2270
|
+
model: z6.string()
|
|
2271
|
+
})
|
|
2272
|
+
});
|
|
2273
|
+
var responseOutputItemDoneSchema = z6.object({
|
|
2274
|
+
type: z6.literal("response.output_item.done"),
|
|
2275
|
+
output_index: z6.number(),
|
|
2276
|
+
item: z6.discriminatedUnion("type", [
|
|
2277
|
+
z6.object({
|
|
2278
|
+
type: z6.literal("message")
|
|
2279
|
+
}),
|
|
2280
|
+
z6.object({
|
|
2281
|
+
type: z6.literal("function_call"),
|
|
2282
|
+
id: z6.string(),
|
|
2283
|
+
call_id: z6.string(),
|
|
2284
|
+
name: z6.string(),
|
|
2285
|
+
arguments: z6.string(),
|
|
2286
|
+
status: z6.literal("completed")
|
|
2287
|
+
})
|
|
2288
|
+
])
|
|
2289
|
+
});
|
|
2290
|
+
var responseFunctionCallArgumentsDeltaSchema = z6.object({
|
|
2291
|
+
type: z6.literal("response.function_call_arguments.delta"),
|
|
2292
|
+
item_id: z6.string(),
|
|
2293
|
+
output_index: z6.number(),
|
|
2294
|
+
delta: z6.string()
|
|
2295
|
+
});
|
|
2296
|
+
var responseOutputItemAddedSchema = z6.object({
|
|
2297
|
+
type: z6.literal("response.output_item.added"),
|
|
2298
|
+
output_index: z6.number(),
|
|
2299
|
+
item: z6.discriminatedUnion("type", [
|
|
2300
|
+
z6.object({
|
|
2301
|
+
type: z6.literal("message")
|
|
2302
|
+
}),
|
|
2303
|
+
z6.object({
|
|
2304
|
+
type: z6.literal("function_call"),
|
|
2305
|
+
id: z6.string(),
|
|
2306
|
+
call_id: z6.string(),
|
|
2307
|
+
name: z6.string(),
|
|
2308
|
+
arguments: z6.string()
|
|
2309
|
+
})
|
|
2310
|
+
])
|
|
2311
|
+
});
|
|
2312
|
+
var responseAnnotationAddedSchema = z6.object({
|
|
2313
|
+
type: z6.literal("response.output_text.annotation.added"),
|
|
2314
|
+
annotation: z6.object({
|
|
2315
|
+
type: z6.literal("url_citation"),
|
|
2316
|
+
url: z6.string(),
|
|
2317
|
+
title: z6.string()
|
|
2318
|
+
})
|
|
2319
|
+
});
|
|
2320
|
+
var openaiResponsesChunkSchema = z6.union([
|
|
2321
|
+
textDeltaChunkSchema,
|
|
2322
|
+
responseFinishedChunkSchema,
|
|
2323
|
+
responseCreatedChunkSchema,
|
|
2324
|
+
responseOutputItemDoneSchema,
|
|
2325
|
+
responseFunctionCallArgumentsDeltaSchema,
|
|
2326
|
+
responseOutputItemAddedSchema,
|
|
2327
|
+
responseAnnotationAddedSchema,
|
|
2328
|
+
z6.object({ type: z6.string() }).passthrough()
|
|
2329
|
+
// fallback for unknown chunks
|
|
2330
|
+
]);
|
|
2331
|
+
function isTextDeltaChunk(chunk) {
|
|
2332
|
+
return chunk.type === "response.output_text.delta";
|
|
2333
|
+
}
|
|
2334
|
+
function isResponseOutputItemDoneChunk(chunk) {
|
|
2335
|
+
return chunk.type === "response.output_item.done";
|
|
2336
|
+
}
|
|
2337
|
+
function isResponseFinishedChunk(chunk) {
|
|
2338
|
+
return chunk.type === "response.completed" || chunk.type === "response.incomplete";
|
|
2339
|
+
}
|
|
2340
|
+
function isResponseCreatedChunk(chunk) {
|
|
2341
|
+
return chunk.type === "response.created";
|
|
2342
|
+
}
|
|
2343
|
+
function isResponseFunctionCallArgumentsDeltaChunk(chunk) {
|
|
2344
|
+
return chunk.type === "response.function_call_arguments.delta";
|
|
2345
|
+
}
|
|
2346
|
+
function isResponseOutputItemAddedChunk(chunk) {
|
|
2347
|
+
return chunk.type === "response.output_item.added";
|
|
2348
|
+
}
|
|
2349
|
+
function isResponseAnnotationAddedChunk(chunk) {
|
|
2350
|
+
return chunk.type === "response.output_text.annotation.added";
|
|
2351
|
+
}
|
|
2352
|
+
function getResponsesModelConfig(modelId) {
|
|
2353
|
+
if (modelId.startsWith("o")) {
|
|
2354
|
+
if (modelId.startsWith("o1-mini") || modelId.startsWith("o1-preview")) {
|
|
2355
|
+
return {
|
|
2356
|
+
isReasoningModel: true,
|
|
2357
|
+
systemMessageMode: "remove",
|
|
2358
|
+
requiredAutoTruncation: false
|
|
2359
|
+
};
|
|
2360
|
+
}
|
|
2361
|
+
return {
|
|
2362
|
+
isReasoningModel: true,
|
|
2363
|
+
systemMessageMode: "developer",
|
|
2364
|
+
requiredAutoTruncation: false
|
|
2365
|
+
};
|
|
2366
|
+
}
|
|
2367
|
+
return {
|
|
2368
|
+
isReasoningModel: false,
|
|
2369
|
+
systemMessageMode: "system",
|
|
2370
|
+
requiredAutoTruncation: false
|
|
2371
|
+
};
|
|
2372
|
+
}
|
|
2373
|
+
|
|
2374
|
+
// src/openai-tools.ts
|
|
2375
|
+
import { z as z7 } from "zod";
|
|
2376
|
+
var WebSearchPreviewParameters = z7.object({});
|
|
2377
|
+
function webSearchPreviewTool({
|
|
2378
|
+
searchContextSize,
|
|
2379
|
+
userLocation
|
|
2380
|
+
} = {}) {
|
|
2381
|
+
return {
|
|
2382
|
+
type: "provider-defined",
|
|
2383
|
+
id: "openai.web_search_preview",
|
|
2384
|
+
args: {
|
|
2385
|
+
searchContextSize,
|
|
2386
|
+
userLocation
|
|
2387
|
+
},
|
|
2388
|
+
parameters: WebSearchPreviewParameters
|
|
2389
|
+
};
|
|
2390
|
+
}
|
|
2391
|
+
var openaiTools = {
|
|
2392
|
+
webSearchPreview: webSearchPreviewTool
|
|
2393
|
+
};
|
|
2394
|
+
|
|
1602
2395
|
// src/openai-provider.ts
|
|
1603
2396
|
function createOpenAI(options = {}) {
|
|
1604
2397
|
var _a, _b, _c;
|
|
@@ -1655,17 +2448,27 @@ function createOpenAI(options = {}) {
|
|
|
1655
2448
|
}
|
|
1656
2449
|
return createChatModel(modelId, settings);
|
|
1657
2450
|
};
|
|
2451
|
+
const createResponsesModel = (modelId) => {
|
|
2452
|
+
return new OpenAIResponsesLanguageModel(modelId, {
|
|
2453
|
+
provider: `${providerName}.responses`,
|
|
2454
|
+
url: ({ path }) => `${baseURL}${path}`,
|
|
2455
|
+
headers: getHeaders,
|
|
2456
|
+
fetch: options.fetch
|
|
2457
|
+
});
|
|
2458
|
+
};
|
|
1658
2459
|
const provider = function(modelId, settings) {
|
|
1659
2460
|
return createLanguageModel(modelId, settings);
|
|
1660
2461
|
};
|
|
1661
2462
|
provider.languageModel = createLanguageModel;
|
|
1662
2463
|
provider.chat = createChatModel;
|
|
1663
2464
|
provider.completion = createCompletionModel;
|
|
2465
|
+
provider.responses = createResponsesModel;
|
|
1664
2466
|
provider.embedding = createEmbeddingModel;
|
|
1665
2467
|
provider.textEmbedding = createEmbeddingModel;
|
|
1666
2468
|
provider.textEmbeddingModel = createEmbeddingModel;
|
|
1667
2469
|
provider.image = createImageModel;
|
|
1668
2470
|
provider.imageModel = createImageModel;
|
|
2471
|
+
provider.tools = openaiTools;
|
|
1669
2472
|
return provider;
|
|
1670
2473
|
}
|
|
1671
2474
|
var openai = createOpenAI({
|