@mastra/server 0.0.0-vector-sources-20250516175436 → 0.0.0-vector-extension-schema-20250922130418
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +3745 -0
- package/LICENSE.md +11 -42
- package/README.md +0 -5
- package/dist/{chunk-H5PTF3Y4.js → chunk-4QCXUEAT.js} +11 -2
- package/dist/chunk-4QCXUEAT.js.map +1 -0
- package/dist/chunk-4RRMWXQ2.js +3522 -0
- package/dist/chunk-4RRMWXQ2.js.map +1 -0
- package/dist/chunk-57HWW2TY.cjs +587 -0
- package/dist/chunk-57HWW2TY.cjs.map +1 -0
- package/dist/chunk-5DP5XZH6.cjs +928 -0
- package/dist/chunk-5DP5XZH6.cjs.map +1 -0
- package/dist/chunk-66YYHFGF.js +761 -0
- package/dist/chunk-66YYHFGF.js.map +1 -0
- package/dist/chunk-6GMFZ5LK.js +2774 -0
- package/dist/chunk-6GMFZ5LK.js.map +1 -0
- package/dist/chunk-743UIDHI.cjs +2013 -0
- package/dist/chunk-743UIDHI.cjs.map +1 -0
- package/dist/chunk-7JYXPDM4.js +15712 -0
- package/dist/chunk-7JYXPDM4.js.map +1 -0
- package/dist/{chunk-OCWPVYNI.cjs → chunk-7NADHFD2.cjs} +3 -0
- package/dist/chunk-7NADHFD2.cjs.map +1 -0
- package/dist/chunk-7QEJ5QG5.js +151 -0
- package/dist/chunk-7QEJ5QG5.js.map +1 -0
- package/dist/chunk-A3AL7EWJ.js +83 -0
- package/dist/chunk-A3AL7EWJ.js.map +1 -0
- package/dist/chunk-AK2FXLLB.cjs +849 -0
- package/dist/chunk-AK2FXLLB.cjs.map +1 -0
- package/dist/{chunk-5SN4U5AC.cjs → chunk-AVEPEUN4.cjs} +115 -138
- package/dist/chunk-AVEPEUN4.cjs.map +1 -0
- package/dist/chunk-CNU4A2XU.js +129 -0
- package/dist/chunk-CNU4A2XU.js.map +1 -0
- package/dist/chunk-CY4TP3FK.js +16 -0
- package/dist/chunk-CY4TP3FK.js.map +1 -0
- package/dist/chunk-EMMSS5I5.cjs +37 -0
- package/dist/chunk-EMMSS5I5.cjs.map +1 -0
- package/dist/chunk-EMNGA4R4.js +845 -0
- package/dist/chunk-EMNGA4R4.js.map +1 -0
- package/dist/chunk-FALVL2VV.cjs +3525 -0
- package/dist/chunk-FALVL2VV.cjs.map +1 -0
- package/dist/chunk-FQNT7PI4.js +937 -0
- package/dist/chunk-FQNT7PI4.js.map +1 -0
- package/dist/chunk-G3PMV62Z.js +33 -0
- package/dist/chunk-G3PMV62Z.js.map +1 -0
- package/dist/chunk-G4PUALCE.cjs +28 -0
- package/dist/chunk-G4PUALCE.cjs.map +1 -0
- package/dist/chunk-G662L2YZ.js +568 -0
- package/dist/chunk-G662L2YZ.js.map +1 -0
- package/dist/chunk-GDWMF6SB.cjs +133 -0
- package/dist/chunk-GDWMF6SB.cjs.map +1 -0
- package/dist/chunk-GU4EWMZB.cjs +769 -0
- package/dist/chunk-GU4EWMZB.cjs.map +1 -0
- package/dist/chunk-GUI3CROV.cjs +159 -0
- package/dist/chunk-GUI3CROV.cjs.map +1 -0
- package/dist/chunk-HJQKWRKQ.cjs +764 -0
- package/dist/chunk-HJQKWRKQ.cjs.map +1 -0
- package/dist/{chunk-YWLUOY3D.cjs → chunk-HVBBFCDH.cjs} +1110 -793
- package/dist/chunk-HVBBFCDH.cjs.map +1 -0
- package/dist/chunk-HZJRQ5L3.cjs +1411 -0
- package/dist/chunk-HZJRQ5L3.cjs.map +1 -0
- package/dist/chunk-IGFMAZZ5.cjs +1150 -0
- package/dist/chunk-IGFMAZZ5.cjs.map +1 -0
- package/dist/chunk-ILESGJ6N.js +524 -0
- package/dist/chunk-ILESGJ6N.js.map +1 -0
- package/dist/chunk-IOQGI4ML.js +931 -0
- package/dist/chunk-IOQGI4ML.js.map +1 -0
- package/dist/chunk-J7BPKKOG.cjs +163 -0
- package/dist/chunk-J7BPKKOG.cjs.map +1 -0
- package/dist/{chunk-HFWCEP5S.js → chunk-JRDEOHAJ.js} +47 -14
- package/dist/chunk-JRDEOHAJ.js.map +1 -0
- package/dist/chunk-KNGXRN26.cjs +335 -0
- package/dist/chunk-KNGXRN26.cjs.map +1 -0
- package/dist/{chunk-OR3CIE2H.js → chunk-KV6VHX4V.js} +29 -7
- package/dist/chunk-KV6VHX4V.js.map +1 -0
- package/dist/chunk-L265APUD.cjs +69 -0
- package/dist/chunk-L265APUD.cjs.map +1 -0
- package/dist/chunk-LF2ZLOFP.js +767 -0
- package/dist/chunk-LF2ZLOFP.js.map +1 -0
- package/dist/chunk-LYPU75T6.js +1147 -0
- package/dist/chunk-LYPU75T6.js.map +1 -0
- package/dist/{chunk-NYN7KFXL.js → chunk-MMROOK5J.js} +3 -0
- package/dist/chunk-MMROOK5J.js.map +1 -0
- package/dist/chunk-N35YCWQ5.cjs +540 -0
- package/dist/chunk-N35YCWQ5.cjs.map +1 -0
- package/dist/{chunk-LIVAK2DM.js → chunk-N7F33WAD.js} +1083 -794
- package/dist/chunk-N7F33WAD.js.map +1 -0
- package/dist/chunk-NG5IVLEZ.js +1012 -0
- package/dist/chunk-NG5IVLEZ.js.map +1 -0
- package/dist/chunk-NLWACBE7.cjs +128 -0
- package/dist/chunk-NLWACBE7.cjs.map +1 -0
- package/dist/chunk-OGW6HHVI.js +1408 -0
- package/dist/chunk-OGW6HHVI.js.map +1 -0
- package/dist/chunk-OJQOYXHU.cjs +15748 -0
- package/dist/chunk-OJQOYXHU.cjs.map +1 -0
- package/dist/chunk-OZLRIVC4.cjs +588 -0
- package/dist/chunk-OZLRIVC4.cjs.map +1 -0
- package/dist/chunk-P7CIEIJ3.js +925 -0
- package/dist/chunk-P7CIEIJ3.js.map +1 -0
- package/dist/chunk-P7RBMCBE.cjs +934 -0
- package/dist/chunk-P7RBMCBE.cjs.map +1 -0
- package/dist/chunk-PPYGWINI.cjs +2777 -0
- package/dist/chunk-PPYGWINI.cjs.map +1 -0
- package/dist/{chunk-P6SCPDYW.js → chunk-PUYSH3IL.js} +114 -137
- package/dist/chunk-PUYSH3IL.js.map +1 -0
- package/dist/{chunk-MHKNLNAN.cjs → chunk-PWTXZZTR.cjs} +33 -10
- package/dist/chunk-PWTXZZTR.cjs.map +1 -0
- package/dist/chunk-R7NOGUZG.js +65 -0
- package/dist/chunk-R7NOGUZG.js.map +1 -0
- package/dist/chunk-RCHEPTZZ.js +2006 -0
- package/dist/chunk-RCHEPTZZ.js.map +1 -0
- package/dist/chunk-RE4RPXT2.cjs +18 -0
- package/dist/chunk-RE4RPXT2.cjs.map +1 -0
- package/dist/chunk-SIGXR3JT.cjs +1043 -0
- package/dist/chunk-SIGXR3JT.cjs.map +1 -0
- package/dist/chunk-SPLSYTYW.cjs +88 -0
- package/dist/chunk-SPLSYTYW.cjs.map +1 -0
- package/dist/chunk-SQY4T6EJ.js +571 -0
- package/dist/chunk-SQY4T6EJ.js.map +1 -0
- package/dist/{chunk-TJKLBTFB.js → chunk-SYRRSBGL.js} +51 -27
- package/dist/chunk-SYRRSBGL.js.map +1 -0
- package/dist/{chunk-BNEY4P4P.cjs → chunk-T3TIA3O6.cjs} +20 -18
- package/dist/chunk-T3TIA3O6.cjs.map +1 -0
- package/dist/{chunk-EJO45KYT.js → chunk-TTHEEIZ3.js} +53 -50
- package/dist/chunk-TTHEEIZ3.js.map +1 -0
- package/dist/chunk-TVSIG4JE.cjs +940 -0
- package/dist/chunk-TVSIG4JE.cjs.map +1 -0
- package/dist/{chunk-55DOQLP6.js → chunk-WHN4VX55.js} +5 -3
- package/dist/chunk-WHN4VX55.js.map +1 -0
- package/dist/dist-26HWEQY6.js +3 -0
- package/dist/dist-26HWEQY6.js.map +1 -0
- package/dist/dist-3A5DXB37.cjs +20 -0
- package/dist/dist-3A5DXB37.cjs.map +1 -0
- package/dist/dist-3SJKQJGY.cjs +16 -0
- package/dist/dist-3SJKQJGY.cjs.map +1 -0
- package/dist/dist-4ZQSPE5K.js +3 -0
- package/dist/dist-4ZQSPE5K.js.map +1 -0
- package/dist/dist-5W5QNRTD.js +3 -0
- package/dist/dist-5W5QNRTD.js.map +1 -0
- package/dist/dist-653SRMPL.js +3 -0
- package/dist/dist-653SRMPL.js.map +1 -0
- package/dist/dist-6U6EFC5C.cjs +16 -0
- package/dist/dist-6U6EFC5C.cjs.map +1 -0
- package/dist/dist-7IHNNYMF.cjs +16 -0
- package/dist/dist-7IHNNYMF.cjs.map +1 -0
- package/dist/dist-B5IPRF6W.js +3 -0
- package/dist/dist-B5IPRF6W.js.map +1 -0
- package/dist/dist-EOMYFT4Y.cjs +16 -0
- package/dist/dist-EOMYFT4Y.cjs.map +1 -0
- package/dist/dist-EZZMMMNT.cjs +16 -0
- package/dist/dist-EZZMMMNT.cjs.map +1 -0
- package/dist/dist-F2ET4MNO.cjs +16 -0
- package/dist/dist-F2ET4MNO.cjs.map +1 -0
- package/dist/dist-H64VX6DE.js +3 -0
- package/dist/dist-H64VX6DE.js.map +1 -0
- package/dist/dist-HY7RMLJQ.cjs +16 -0
- package/dist/dist-HY7RMLJQ.cjs.map +1 -0
- package/dist/dist-M6S4P3FJ.js +3 -0
- package/dist/dist-M6S4P3FJ.js.map +1 -0
- package/dist/dist-NR7QSCQT.js +3 -0
- package/dist/dist-NR7QSCQT.js.map +1 -0
- package/dist/dist-QLFMCMCX.js +3 -0
- package/dist/dist-QLFMCMCX.js.map +1 -0
- package/dist/dist-UY46BFRP.js +3 -0
- package/dist/dist-UY46BFRP.js.map +1 -0
- package/dist/dist-WCQDRTIV.cjs +16 -0
- package/dist/dist-WCQDRTIV.cjs.map +1 -0
- package/dist/dist-WKYB3LTJ.cjs +16 -0
- package/dist/dist-WKYB3LTJ.cjs.map +1 -0
- package/dist/index.cjs +6 -0
- package/dist/index.cjs.map +1 -0
- package/dist/index.d.ts +1 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +5 -0
- package/dist/index.js.map +1 -0
- package/dist/server/a2a/protocol.d.ts +8 -0
- package/dist/server/a2a/protocol.d.ts.map +1 -0
- package/dist/server/a2a/store.cjs +25 -0
- package/dist/server/a2a/store.cjs.map +1 -0
- package/dist/server/a2a/store.d.ts +14 -0
- package/dist/server/a2a/store.d.ts.map +1 -0
- package/dist/server/a2a/store.js +23 -0
- package/dist/server/a2a/store.js.map +1 -0
- package/dist/server/a2a/tasks.d.ts +20 -0
- package/dist/server/a2a/tasks.d.ts.map +1 -0
- package/dist/server/handlers/a2a.cjs +13 -11
- package/dist/server/handlers/a2a.cjs.map +1 -0
- package/dist/server/handlers/a2a.d.ts +68 -6
- package/dist/server/handlers/a2a.d.ts.map +1 -0
- package/dist/server/handlers/a2a.js +3 -1
- package/dist/server/handlers/a2a.js.map +1 -0
- package/dist/server/handlers/agent-builder.cjs +68 -0
- package/dist/server/handlers/agent-builder.cjs.map +1 -0
- package/dist/server/handlers/agent-builder.d.ts +88 -0
- package/dist/server/handlers/agent-builder.d.ts.map +1 -0
- package/dist/server/handlers/agent-builder.js +3 -0
- package/dist/server/handlers/agent-builder.js.map +1 -0
- package/dist/server/handlers/agents.cjs +41 -7
- package/dist/server/handlers/agents.cjs.map +1 -0
- package/dist/server/handlers/agents.d.ts +139 -6
- package/dist/server/handlers/agents.d.ts.map +1 -0
- package/dist/server/handlers/agents.js +3 -1
- package/dist/server/handlers/agents.js.map +1 -0
- package/dist/server/handlers/error.cjs +4 -2
- package/dist/server/handlers/error.cjs.map +1 -0
- package/dist/server/handlers/error.d.ts +2 -1
- package/dist/server/handlers/error.d.ts.map +1 -0
- package/dist/server/handlers/error.js +3 -1
- package/dist/server/handlers/error.js.map +1 -0
- package/dist/server/handlers/legacyWorkflows.cjs +48 -0
- package/dist/server/handlers/legacyWorkflows.cjs.map +1 -0
- package/dist/server/handlers/legacyWorkflows.d.ts +59 -0
- package/dist/server/handlers/legacyWorkflows.d.ts.map +1 -0
- package/dist/server/handlers/legacyWorkflows.js +3 -0
- package/dist/server/handlers/legacyWorkflows.js.map +1 -0
- package/dist/server/handlers/logs.cjs +6 -4
- package/dist/server/handlers/logs.cjs.map +1 -0
- package/dist/server/handlers/logs.d.ts +34 -3
- package/dist/server/handlers/logs.d.ts.map +1 -0
- package/dist/server/handlers/logs.js +3 -1
- package/dist/server/handlers/logs.js.map +1 -0
- package/dist/server/handlers/memory.cjs +39 -9
- package/dist/server/handlers/memory.cjs.map +1 -0
- package/dist/server/handlers/memory.d.ts +118 -8
- package/dist/server/handlers/memory.d.ts.map +1 -0
- package/dist/server/handlers/memory.js +3 -1
- package/dist/server/handlers/memory.js.map +1 -0
- package/dist/server/handlers/observability.cjs +16 -0
- package/dist/server/handlers/observability.cjs.map +1 -0
- package/dist/server/handlers/observability.d.ts +23 -0
- package/dist/server/handlers/observability.d.ts.map +1 -0
- package/dist/server/handlers/observability.js +3 -0
- package/dist/server/handlers/observability.js.map +1 -0
- package/dist/server/handlers/scores.cjs +32 -0
- package/dist/server/handlers/scores.cjs.map +1 -0
- package/dist/server/handlers/scores.d.ts +49 -0
- package/dist/server/handlers/scores.d.ts.map +1 -0
- package/dist/server/handlers/scores.js +3 -0
- package/dist/server/handlers/scores.js.map +1 -0
- package/dist/server/handlers/telemetry.cjs +9 -3
- package/dist/server/handlers/telemetry.cjs.map +1 -0
- package/dist/server/handlers/telemetry.d.ts +33 -2
- package/dist/server/handlers/telemetry.d.ts.map +1 -0
- package/dist/server/handlers/telemetry.js +3 -1
- package/dist/server/handlers/telemetry.js.map +1 -0
- package/dist/server/handlers/tools.cjs +11 -5
- package/dist/server/handlers/tools.cjs.map +1 -0
- package/dist/server/handlers/tools.d.ts +25 -4
- package/dist/server/handlers/tools.d.ts.map +1 -0
- package/dist/server/handlers/tools.js +3 -1
- package/dist/server/handlers/tools.js.map +1 -0
- package/dist/server/handlers/utils.cjs +8 -2
- package/dist/server/handlers/utils.cjs.map +1 -0
- package/dist/server/handlers/utils.d.ts +8 -1
- package/dist/server/handlers/utils.d.ts.map +1 -0
- package/dist/server/handlers/utils.js +3 -1
- package/dist/server/handlers/utils.js.map +1 -0
- package/dist/server/handlers/vNextNetwork.cjs +220 -0
- package/dist/server/handlers/vNextNetwork.cjs.map +1 -0
- package/dist/server/handlers/vNextNetwork.d.ts +246 -0
- package/dist/server/handlers/vNextNetwork.d.ts.map +1 -0
- package/dist/server/handlers/vNextNetwork.js +213 -0
- package/dist/server/handlers/vNextNetwork.js.map +1 -0
- package/dist/server/handlers/vector.cjs +9 -7
- package/dist/server/handlers/vector.cjs.map +1 -0
- package/dist/server/handlers/vector.d.ts +51 -6
- package/dist/server/handlers/vector.d.ts.map +1 -0
- package/dist/server/handlers/vector.js +3 -1
- package/dist/server/handlers/vector.js.map +1 -0
- package/dist/server/handlers/voice.cjs +10 -4
- package/dist/server/handlers/voice.cjs.map +1 -0
- package/dist/server/handlers/voice.d.ts +41 -3
- package/dist/server/handlers/voice.d.ts.map +1 -0
- package/dist/server/handlers/voice.js +3 -1
- package/dist/server/handlers/voice.js.map +1 -0
- package/dist/server/handlers/workflows.cjs +43 -13
- package/dist/server/handlers/workflows.cjs.map +1 -0
- package/dist/server/handlers/workflows.d.ts +82 -10
- package/dist/server/handlers/workflows.d.ts.map +1 -0
- package/dist/server/handlers/workflows.js +3 -1
- package/dist/server/handlers/workflows.js.map +1 -0
- package/dist/server/handlers.cjs +44 -32
- package/dist/server/handlers.cjs.map +1 -0
- package/dist/server/handlers.d.ts +14 -11
- package/dist/server/handlers.d.ts.map +1 -0
- package/dist/server/handlers.js +15 -11
- package/dist/server/handlers.js.map +1 -0
- package/dist/server/http-exception.d.ts +87 -0
- package/dist/server/http-exception.d.ts.map +1 -0
- package/dist/server/types.d.ts +10 -0
- package/dist/server/types.d.ts.map +1 -0
- package/dist/server/utils.d.ts +44 -0
- package/dist/server/utils.d.ts.map +1 -0
- package/package.json +51 -21
- package/dist/_tsup-dts-rollup.d.cts +0 -816
- package/dist/_tsup-dts-rollup.d.ts +0 -816
- package/dist/chunk-57CJTIPW.cjs +0 -18
- package/dist/chunk-64U3UDTH.cjs +0 -13
- package/dist/chunk-75ZPJI57.cjs +0 -9
- package/dist/chunk-C7564HUT.js +0 -142
- package/dist/chunk-D4IRYCUI.cjs +0 -235
- package/dist/chunk-DJJIUEL2.js +0 -211
- package/dist/chunk-HWZVAG3H.js +0 -49
- package/dist/chunk-I2B73Y4I.cjs +0 -332
- package/dist/chunk-M5ABIP7D.js +0 -11
- package/dist/chunk-MIQYDLLM.js +0 -329
- package/dist/chunk-MLKGABMK.js +0 -7
- package/dist/chunk-OGCNNUHF.cjs +0 -54
- package/dist/chunk-UCTEMO2Q.cjs +0 -341
- package/dist/chunk-VPNDC2DI.cjs +0 -148
- package/dist/chunk-WUC6LSTW.js +0 -227
- package/dist/chunk-Y7UWRW5X.cjs +0 -221
- package/dist/chunk-YBVOQN4M.cjs +0 -94
- package/dist/chunk-ZE5AAC4I.cjs +0 -138
- package/dist/index.d.cts +0 -1
- package/dist/server/handlers/a2a.d.cts +0 -6
- package/dist/server/handlers/agents.d.cts +0 -6
- package/dist/server/handlers/error.d.cts +0 -1
- package/dist/server/handlers/logs.d.cts +0 -3
- package/dist/server/handlers/memory.d.cts +0 -8
- package/dist/server/handlers/network.cjs +0 -22
- package/dist/server/handlers/network.d.cts +0 -4
- package/dist/server/handlers/network.d.ts +0 -4
- package/dist/server/handlers/network.js +0 -1
- package/dist/server/handlers/telemetry.d.cts +0 -2
- package/dist/server/handlers/tools.d.cts +0 -4
- package/dist/server/handlers/utils.d.cts +0 -1
- package/dist/server/handlers/vNextWorkflows.cjs +0 -46
- package/dist/server/handlers/vNextWorkflows.d.cts +0 -10
- package/dist/server/handlers/vNextWorkflows.d.ts +0 -10
- package/dist/server/handlers/vNextWorkflows.js +0 -1
- package/dist/server/handlers/vector.d.cts +0 -6
- package/dist/server/handlers/voice.d.cts +0 -3
- package/dist/server/handlers/workflows.d.cts +0 -10
- package/dist/server/handlers.d.cts +0 -11
|
@@ -0,0 +1,3522 @@
|
|
|
1
|
+
import { createJsonErrorResponseHandler, createProviderDefinedToolFactory, withoutTrailingSlash, loadApiKey, parseProviderOptions, postJsonToApi, createJsonResponseHandler, combineHeaders, APICallError, generateId, createEventSourceResponseHandler, convertToBase64, UnsupportedFunctionalityError, InvalidResponseDataError, isParsableJson, TooManyEmbeddingValuesForCallError, convertBase64ToUint8Array, postFormDataToApi, createBinaryResponseHandler, InvalidPromptError } from './chunk-N7F33WAD.js';
|
|
2
|
+
import { z } from 'zod/v4';
|
|
3
|
+
|
|
4
|
+
var openaiErrorDataSchema = z.object({
|
|
5
|
+
error: z.object({
|
|
6
|
+
message: z.string(),
|
|
7
|
+
// The additional information below is handled loosely to support
|
|
8
|
+
// OpenAI-compatible providers that have slightly different error
|
|
9
|
+
// responses:
|
|
10
|
+
type: z.string().nullish(),
|
|
11
|
+
param: z.any().nullish(),
|
|
12
|
+
code: z.union([z.string(), z.number()]).nullish()
|
|
13
|
+
})
|
|
14
|
+
});
|
|
15
|
+
var openaiFailedResponseHandler = createJsonErrorResponseHandler({
|
|
16
|
+
errorSchema: openaiErrorDataSchema,
|
|
17
|
+
errorToMessage: (data) => data.error.message
|
|
18
|
+
});
|
|
19
|
+
function convertToOpenAIChatMessages({
|
|
20
|
+
prompt,
|
|
21
|
+
systemMessageMode = "system"
|
|
22
|
+
}) {
|
|
23
|
+
const messages = [];
|
|
24
|
+
const warnings = [];
|
|
25
|
+
for (const { role, content } of prompt) {
|
|
26
|
+
switch (role) {
|
|
27
|
+
case "system": {
|
|
28
|
+
switch (systemMessageMode) {
|
|
29
|
+
case "system": {
|
|
30
|
+
messages.push({ role: "system", content });
|
|
31
|
+
break;
|
|
32
|
+
}
|
|
33
|
+
case "developer": {
|
|
34
|
+
messages.push({ role: "developer", content });
|
|
35
|
+
break;
|
|
36
|
+
}
|
|
37
|
+
case "remove": {
|
|
38
|
+
warnings.push({
|
|
39
|
+
type: "other",
|
|
40
|
+
message: "system messages are removed for this model"
|
|
41
|
+
});
|
|
42
|
+
break;
|
|
43
|
+
}
|
|
44
|
+
default: {
|
|
45
|
+
const _exhaustiveCheck = systemMessageMode;
|
|
46
|
+
throw new Error(
|
|
47
|
+
`Unsupported system message mode: ${_exhaustiveCheck}`
|
|
48
|
+
);
|
|
49
|
+
}
|
|
50
|
+
}
|
|
51
|
+
break;
|
|
52
|
+
}
|
|
53
|
+
case "user": {
|
|
54
|
+
if (content.length === 1 && content[0].type === "text") {
|
|
55
|
+
messages.push({ role: "user", content: content[0].text });
|
|
56
|
+
break;
|
|
57
|
+
}
|
|
58
|
+
messages.push({
|
|
59
|
+
role: "user",
|
|
60
|
+
content: content.map((part, index) => {
|
|
61
|
+
var _a, _b, _c;
|
|
62
|
+
switch (part.type) {
|
|
63
|
+
case "text": {
|
|
64
|
+
return { type: "text", text: part.text };
|
|
65
|
+
}
|
|
66
|
+
case "file": {
|
|
67
|
+
if (part.mediaType.startsWith("image/")) {
|
|
68
|
+
const mediaType = part.mediaType === "image/*" ? "image/jpeg" : part.mediaType;
|
|
69
|
+
return {
|
|
70
|
+
type: "image_url",
|
|
71
|
+
image_url: {
|
|
72
|
+
url: part.data instanceof URL ? part.data.toString() : `data:${mediaType};base64,${convertToBase64(part.data)}`,
|
|
73
|
+
// OpenAI specific extension: image detail
|
|
74
|
+
detail: (_b = (_a = part.providerOptions) == null ? void 0 : _a.openai) == null ? void 0 : _b.imageDetail
|
|
75
|
+
}
|
|
76
|
+
};
|
|
77
|
+
} else if (part.mediaType.startsWith("audio/")) {
|
|
78
|
+
if (part.data instanceof URL) {
|
|
79
|
+
throw new UnsupportedFunctionalityError({
|
|
80
|
+
functionality: "audio file parts with URLs"
|
|
81
|
+
});
|
|
82
|
+
}
|
|
83
|
+
switch (part.mediaType) {
|
|
84
|
+
case "audio/wav": {
|
|
85
|
+
return {
|
|
86
|
+
type: "input_audio",
|
|
87
|
+
input_audio: {
|
|
88
|
+
data: convertToBase64(part.data),
|
|
89
|
+
format: "wav"
|
|
90
|
+
}
|
|
91
|
+
};
|
|
92
|
+
}
|
|
93
|
+
case "audio/mp3":
|
|
94
|
+
case "audio/mpeg": {
|
|
95
|
+
return {
|
|
96
|
+
type: "input_audio",
|
|
97
|
+
input_audio: {
|
|
98
|
+
data: convertToBase64(part.data),
|
|
99
|
+
format: "mp3"
|
|
100
|
+
}
|
|
101
|
+
};
|
|
102
|
+
}
|
|
103
|
+
default: {
|
|
104
|
+
throw new UnsupportedFunctionalityError({
|
|
105
|
+
functionality: `audio content parts with media type ${part.mediaType}`
|
|
106
|
+
});
|
|
107
|
+
}
|
|
108
|
+
}
|
|
109
|
+
} else if (part.mediaType === "application/pdf") {
|
|
110
|
+
if (part.data instanceof URL) {
|
|
111
|
+
throw new UnsupportedFunctionalityError({
|
|
112
|
+
functionality: "PDF file parts with URLs"
|
|
113
|
+
});
|
|
114
|
+
}
|
|
115
|
+
return {
|
|
116
|
+
type: "file",
|
|
117
|
+
file: typeof part.data === "string" && part.data.startsWith("file-") ? { file_id: part.data } : {
|
|
118
|
+
filename: (_c = part.filename) != null ? _c : `part-${index}.pdf`,
|
|
119
|
+
file_data: `data:application/pdf;base64,${convertToBase64(part.data)}`
|
|
120
|
+
}
|
|
121
|
+
};
|
|
122
|
+
} else {
|
|
123
|
+
throw new UnsupportedFunctionalityError({
|
|
124
|
+
functionality: `file part media type ${part.mediaType}`
|
|
125
|
+
});
|
|
126
|
+
}
|
|
127
|
+
}
|
|
128
|
+
}
|
|
129
|
+
})
|
|
130
|
+
});
|
|
131
|
+
break;
|
|
132
|
+
}
|
|
133
|
+
case "assistant": {
|
|
134
|
+
let text = "";
|
|
135
|
+
const toolCalls = [];
|
|
136
|
+
for (const part of content) {
|
|
137
|
+
switch (part.type) {
|
|
138
|
+
case "text": {
|
|
139
|
+
text += part.text;
|
|
140
|
+
break;
|
|
141
|
+
}
|
|
142
|
+
case "tool-call": {
|
|
143
|
+
toolCalls.push({
|
|
144
|
+
id: part.toolCallId,
|
|
145
|
+
type: "function",
|
|
146
|
+
function: {
|
|
147
|
+
name: part.toolName,
|
|
148
|
+
arguments: JSON.stringify(part.input)
|
|
149
|
+
}
|
|
150
|
+
});
|
|
151
|
+
break;
|
|
152
|
+
}
|
|
153
|
+
}
|
|
154
|
+
}
|
|
155
|
+
messages.push({
|
|
156
|
+
role: "assistant",
|
|
157
|
+
content: text,
|
|
158
|
+
tool_calls: toolCalls.length > 0 ? toolCalls : void 0
|
|
159
|
+
});
|
|
160
|
+
break;
|
|
161
|
+
}
|
|
162
|
+
case "tool": {
|
|
163
|
+
for (const toolResponse of content) {
|
|
164
|
+
const output = toolResponse.output;
|
|
165
|
+
let contentValue;
|
|
166
|
+
switch (output.type) {
|
|
167
|
+
case "text":
|
|
168
|
+
case "error-text":
|
|
169
|
+
contentValue = output.value;
|
|
170
|
+
break;
|
|
171
|
+
case "content":
|
|
172
|
+
case "json":
|
|
173
|
+
case "error-json":
|
|
174
|
+
contentValue = JSON.stringify(output.value);
|
|
175
|
+
break;
|
|
176
|
+
}
|
|
177
|
+
messages.push({
|
|
178
|
+
role: "tool",
|
|
179
|
+
tool_call_id: toolResponse.toolCallId,
|
|
180
|
+
content: contentValue
|
|
181
|
+
});
|
|
182
|
+
}
|
|
183
|
+
break;
|
|
184
|
+
}
|
|
185
|
+
default: {
|
|
186
|
+
const _exhaustiveCheck = role;
|
|
187
|
+
throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
|
|
188
|
+
}
|
|
189
|
+
}
|
|
190
|
+
}
|
|
191
|
+
return { messages, warnings };
|
|
192
|
+
}
|
|
193
|
+
function getResponseMetadata({
|
|
194
|
+
id,
|
|
195
|
+
model,
|
|
196
|
+
created
|
|
197
|
+
}) {
|
|
198
|
+
return {
|
|
199
|
+
id: id != null ? id : void 0,
|
|
200
|
+
modelId: model != null ? model : void 0,
|
|
201
|
+
timestamp: created != null ? new Date(created * 1e3) : void 0
|
|
202
|
+
};
|
|
203
|
+
}
|
|
204
|
+
function mapOpenAIFinishReason(finishReason) {
|
|
205
|
+
switch (finishReason) {
|
|
206
|
+
case "stop":
|
|
207
|
+
return "stop";
|
|
208
|
+
case "length":
|
|
209
|
+
return "length";
|
|
210
|
+
case "content_filter":
|
|
211
|
+
return "content-filter";
|
|
212
|
+
case "function_call":
|
|
213
|
+
case "tool_calls":
|
|
214
|
+
return "tool-calls";
|
|
215
|
+
default:
|
|
216
|
+
return "unknown";
|
|
217
|
+
}
|
|
218
|
+
}
|
|
219
|
+
var openaiProviderOptions = z.object({
|
|
220
|
+
/**
|
|
221
|
+
* Modify the likelihood of specified tokens appearing in the completion.
|
|
222
|
+
*
|
|
223
|
+
* Accepts a JSON object that maps tokens (specified by their token ID in
|
|
224
|
+
* the GPT tokenizer) to an associated bias value from -100 to 100.
|
|
225
|
+
*/
|
|
226
|
+
logitBias: z.record(z.coerce.number(), z.number()).optional(),
|
|
227
|
+
/**
|
|
228
|
+
* Return the log probabilities of the tokens.
|
|
229
|
+
*
|
|
230
|
+
* Setting to true will return the log probabilities of the tokens that
|
|
231
|
+
* were generated.
|
|
232
|
+
*
|
|
233
|
+
* Setting to a number will return the log probabilities of the top n
|
|
234
|
+
* tokens that were generated.
|
|
235
|
+
*/
|
|
236
|
+
logprobs: z.union([z.boolean(), z.number()]).optional(),
|
|
237
|
+
/**
|
|
238
|
+
* Whether to enable parallel function calling during tool use. Default to true.
|
|
239
|
+
*/
|
|
240
|
+
parallelToolCalls: z.boolean().optional(),
|
|
241
|
+
/**
|
|
242
|
+
* A unique identifier representing your end-user, which can help OpenAI to
|
|
243
|
+
* monitor and detect abuse.
|
|
244
|
+
*/
|
|
245
|
+
user: z.string().optional(),
|
|
246
|
+
/**
|
|
247
|
+
* Reasoning effort for reasoning models. Defaults to `medium`.
|
|
248
|
+
*/
|
|
249
|
+
reasoningEffort: z.enum(["minimal", "low", "medium", "high"]).optional(),
|
|
250
|
+
/**
|
|
251
|
+
* Maximum number of completion tokens to generate. Useful for reasoning models.
|
|
252
|
+
*/
|
|
253
|
+
maxCompletionTokens: z.number().optional(),
|
|
254
|
+
/**
|
|
255
|
+
* Whether to enable persistence in responses API.
|
|
256
|
+
*/
|
|
257
|
+
store: z.boolean().optional(),
|
|
258
|
+
/**
|
|
259
|
+
* Metadata to associate with the request.
|
|
260
|
+
*/
|
|
261
|
+
metadata: z.record(z.string().max(64), z.string().max(512)).optional(),
|
|
262
|
+
/**
|
|
263
|
+
* Parameters for prediction mode.
|
|
264
|
+
*/
|
|
265
|
+
prediction: z.record(z.string(), z.any()).optional(),
|
|
266
|
+
/**
|
|
267
|
+
* Whether to use structured outputs.
|
|
268
|
+
*
|
|
269
|
+
* @default true
|
|
270
|
+
*/
|
|
271
|
+
structuredOutputs: z.boolean().optional(),
|
|
272
|
+
/**
|
|
273
|
+
* Service tier for the request.
|
|
274
|
+
* - 'auto': Default service tier
|
|
275
|
+
* - 'flex': 50% cheaper processing at the cost of increased latency. Only available for o3 and o4-mini models.
|
|
276
|
+
* - 'priority': Higher-speed processing with predictably low latency at premium cost. Available for Enterprise customers.
|
|
277
|
+
*
|
|
278
|
+
* @default 'auto'
|
|
279
|
+
*/
|
|
280
|
+
serviceTier: z.enum(["auto", "flex", "priority"]).optional(),
|
|
281
|
+
/**
|
|
282
|
+
* Whether to use strict JSON schema validation.
|
|
283
|
+
*
|
|
284
|
+
* @default false
|
|
285
|
+
*/
|
|
286
|
+
strictJsonSchema: z.boolean().optional(),
|
|
287
|
+
/**
|
|
288
|
+
* Controls the verbosity of the model's responses.
|
|
289
|
+
* Lower values will result in more concise responses, while higher values will result in more verbose responses.
|
|
290
|
+
*/
|
|
291
|
+
textVerbosity: z.enum(["low", "medium", "high"]).optional(),
|
|
292
|
+
/**
|
|
293
|
+
* A cache key for prompt caching. Allows manual control over prompt caching behavior.
|
|
294
|
+
* Useful for improving cache hit rates and working around automatic caching issues.
|
|
295
|
+
*/
|
|
296
|
+
promptCacheKey: z.string().optional(),
|
|
297
|
+
/**
|
|
298
|
+
* A stable identifier used to help detect users of your application
|
|
299
|
+
* that may be violating OpenAI's usage policies. The IDs should be a
|
|
300
|
+
* string that uniquely identifies each user. We recommend hashing their
|
|
301
|
+
* username or email address, in order to avoid sending us any identifying
|
|
302
|
+
* information.
|
|
303
|
+
*/
|
|
304
|
+
safetyIdentifier: z.string().optional()
|
|
305
|
+
});
|
|
306
|
+
var comparisonFilterSchema = z.object({
|
|
307
|
+
key: z.string(),
|
|
308
|
+
type: z.enum(["eq", "ne", "gt", "gte", "lt", "lte"]),
|
|
309
|
+
value: z.union([z.string(), z.number(), z.boolean()])
|
|
310
|
+
});
|
|
311
|
+
var compoundFilterSchema = z.object({
|
|
312
|
+
type: z.enum(["and", "or"]),
|
|
313
|
+
filters: z.array(
|
|
314
|
+
z.union([comparisonFilterSchema, z.lazy(() => compoundFilterSchema)])
|
|
315
|
+
)
|
|
316
|
+
});
|
|
317
|
+
var filtersSchema = z.union([comparisonFilterSchema, compoundFilterSchema]);
|
|
318
|
+
var fileSearchArgsSchema = z.object({
|
|
319
|
+
/**
|
|
320
|
+
* List of vector store IDs to search through. If not provided, searches all available vector stores.
|
|
321
|
+
*/
|
|
322
|
+
vectorStoreIds: z.array(z.string()).optional(),
|
|
323
|
+
/**
|
|
324
|
+
* Maximum number of search results to return. Defaults to 10.
|
|
325
|
+
*/
|
|
326
|
+
maxNumResults: z.number().optional(),
|
|
327
|
+
/**
|
|
328
|
+
* Ranking options for the search.
|
|
329
|
+
*/
|
|
330
|
+
ranking: z.object({
|
|
331
|
+
ranker: z.enum(["auto", "default-2024-08-21"]).optional()
|
|
332
|
+
}).optional(),
|
|
333
|
+
/**
|
|
334
|
+
* A filter to apply based on file attributes.
|
|
335
|
+
*/
|
|
336
|
+
filters: filtersSchema.optional()
|
|
337
|
+
});
|
|
338
|
+
var fileSearch = createProviderDefinedToolFactory({
|
|
339
|
+
id: "openai.file_search",
|
|
340
|
+
name: "file_search",
|
|
341
|
+
inputSchema: z.object({
|
|
342
|
+
query: z.string()
|
|
343
|
+
})
|
|
344
|
+
});
|
|
345
|
+
var webSearchPreviewArgsSchema = z.object({
|
|
346
|
+
/**
|
|
347
|
+
* Search context size to use for the web search.
|
|
348
|
+
* - high: Most comprehensive context, highest cost, slower response
|
|
349
|
+
* - medium: Balanced context, cost, and latency (default)
|
|
350
|
+
* - low: Least context, lowest cost, fastest response
|
|
351
|
+
*/
|
|
352
|
+
searchContextSize: z.enum(["low", "medium", "high"]).optional(),
|
|
353
|
+
/**
|
|
354
|
+
* User location information to provide geographically relevant search results.
|
|
355
|
+
*/
|
|
356
|
+
userLocation: z.object({
|
|
357
|
+
/**
|
|
358
|
+
* Type of location (always 'approximate')
|
|
359
|
+
*/
|
|
360
|
+
type: z.literal("approximate"),
|
|
361
|
+
/**
|
|
362
|
+
* Two-letter ISO country code (e.g., 'US', 'GB')
|
|
363
|
+
*/
|
|
364
|
+
country: z.string().optional(),
|
|
365
|
+
/**
|
|
366
|
+
* City name (free text, e.g., 'Minneapolis')
|
|
367
|
+
*/
|
|
368
|
+
city: z.string().optional(),
|
|
369
|
+
/**
|
|
370
|
+
* Region name (free text, e.g., 'Minnesota')
|
|
371
|
+
*/
|
|
372
|
+
region: z.string().optional(),
|
|
373
|
+
/**
|
|
374
|
+
* IANA timezone (e.g., 'America/Chicago')
|
|
375
|
+
*/
|
|
376
|
+
timezone: z.string().optional()
|
|
377
|
+
}).optional()
|
|
378
|
+
});
|
|
379
|
+
var webSearchPreview = createProviderDefinedToolFactory({
|
|
380
|
+
id: "openai.web_search_preview",
|
|
381
|
+
name: "web_search_preview",
|
|
382
|
+
inputSchema: z.object({})
|
|
383
|
+
});
|
|
384
|
+
function prepareChatTools({
|
|
385
|
+
tools,
|
|
386
|
+
toolChoice,
|
|
387
|
+
structuredOutputs,
|
|
388
|
+
strictJsonSchema
|
|
389
|
+
}) {
|
|
390
|
+
tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
|
|
391
|
+
const toolWarnings = [];
|
|
392
|
+
if (tools == null) {
|
|
393
|
+
return { tools: void 0, toolChoice: void 0, toolWarnings };
|
|
394
|
+
}
|
|
395
|
+
const openaiTools2 = [];
|
|
396
|
+
for (const tool of tools) {
|
|
397
|
+
switch (tool.type) {
|
|
398
|
+
case "function":
|
|
399
|
+
openaiTools2.push({
|
|
400
|
+
type: "function",
|
|
401
|
+
function: {
|
|
402
|
+
name: tool.name,
|
|
403
|
+
description: tool.description,
|
|
404
|
+
parameters: tool.inputSchema,
|
|
405
|
+
strict: structuredOutputs ? strictJsonSchema : void 0
|
|
406
|
+
}
|
|
407
|
+
});
|
|
408
|
+
break;
|
|
409
|
+
case "provider-defined":
|
|
410
|
+
switch (tool.id) {
|
|
411
|
+
case "openai.file_search": {
|
|
412
|
+
const args = fileSearchArgsSchema.parse(tool.args);
|
|
413
|
+
openaiTools2.push({
|
|
414
|
+
type: "file_search",
|
|
415
|
+
vector_store_ids: args.vectorStoreIds,
|
|
416
|
+
max_num_results: args.maxNumResults,
|
|
417
|
+
ranking_options: args.ranking ? { ranker: args.ranking.ranker } : void 0,
|
|
418
|
+
filters: args.filters
|
|
419
|
+
});
|
|
420
|
+
break;
|
|
421
|
+
}
|
|
422
|
+
case "openai.web_search_preview": {
|
|
423
|
+
const args = webSearchPreviewArgsSchema.parse(tool.args);
|
|
424
|
+
openaiTools2.push({
|
|
425
|
+
type: "web_search_preview",
|
|
426
|
+
search_context_size: args.searchContextSize,
|
|
427
|
+
user_location: args.userLocation
|
|
428
|
+
});
|
|
429
|
+
break;
|
|
430
|
+
}
|
|
431
|
+
default:
|
|
432
|
+
toolWarnings.push({ type: "unsupported-tool", tool });
|
|
433
|
+
break;
|
|
434
|
+
}
|
|
435
|
+
break;
|
|
436
|
+
default:
|
|
437
|
+
toolWarnings.push({ type: "unsupported-tool", tool });
|
|
438
|
+
break;
|
|
439
|
+
}
|
|
440
|
+
}
|
|
441
|
+
if (toolChoice == null) {
|
|
442
|
+
return { tools: openaiTools2, toolChoice: void 0, toolWarnings };
|
|
443
|
+
}
|
|
444
|
+
const type = toolChoice.type;
|
|
445
|
+
switch (type) {
|
|
446
|
+
case "auto":
|
|
447
|
+
case "none":
|
|
448
|
+
case "required":
|
|
449
|
+
return { tools: openaiTools2, toolChoice: type, toolWarnings };
|
|
450
|
+
case "tool":
|
|
451
|
+
return {
|
|
452
|
+
tools: openaiTools2,
|
|
453
|
+
toolChoice: {
|
|
454
|
+
type: "function",
|
|
455
|
+
function: {
|
|
456
|
+
name: toolChoice.toolName
|
|
457
|
+
}
|
|
458
|
+
},
|
|
459
|
+
toolWarnings
|
|
460
|
+
};
|
|
461
|
+
default: {
|
|
462
|
+
const _exhaustiveCheck = type;
|
|
463
|
+
throw new UnsupportedFunctionalityError({
|
|
464
|
+
functionality: `tool choice type: ${_exhaustiveCheck}`
|
|
465
|
+
});
|
|
466
|
+
}
|
|
467
|
+
}
|
|
468
|
+
}
|
|
469
|
+
var OpenAIChatLanguageModel = class {
|
|
470
|
+
constructor(modelId, config) {
|
|
471
|
+
this.specificationVersion = "v2";
|
|
472
|
+
this.supportedUrls = {
|
|
473
|
+
"image/*": [/^https?:\/\/.*$/]
|
|
474
|
+
};
|
|
475
|
+
this.modelId = modelId;
|
|
476
|
+
this.config = config;
|
|
477
|
+
}
|
|
478
|
+
get provider() {
|
|
479
|
+
return this.config.provider;
|
|
480
|
+
}
|
|
481
|
+
async getArgs({
|
|
482
|
+
prompt,
|
|
483
|
+
maxOutputTokens,
|
|
484
|
+
temperature,
|
|
485
|
+
topP,
|
|
486
|
+
topK,
|
|
487
|
+
frequencyPenalty,
|
|
488
|
+
presencePenalty,
|
|
489
|
+
stopSequences,
|
|
490
|
+
responseFormat,
|
|
491
|
+
seed,
|
|
492
|
+
tools,
|
|
493
|
+
toolChoice,
|
|
494
|
+
providerOptions
|
|
495
|
+
}) {
|
|
496
|
+
var _a, _b, _c, _d;
|
|
497
|
+
const warnings = [];
|
|
498
|
+
const openaiOptions = (_a = await parseProviderOptions({
|
|
499
|
+
provider: "openai",
|
|
500
|
+
providerOptions,
|
|
501
|
+
schema: openaiProviderOptions
|
|
502
|
+
})) != null ? _a : {};
|
|
503
|
+
const structuredOutputs = (_b = openaiOptions.structuredOutputs) != null ? _b : true;
|
|
504
|
+
if (topK != null) {
|
|
505
|
+
warnings.push({
|
|
506
|
+
type: "unsupported-setting",
|
|
507
|
+
setting: "topK"
|
|
508
|
+
});
|
|
509
|
+
}
|
|
510
|
+
if ((responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && !structuredOutputs) {
|
|
511
|
+
warnings.push({
|
|
512
|
+
type: "unsupported-setting",
|
|
513
|
+
setting: "responseFormat",
|
|
514
|
+
details: "JSON response format schema is only supported with structuredOutputs"
|
|
515
|
+
});
|
|
516
|
+
}
|
|
517
|
+
const { messages, warnings: messageWarnings } = convertToOpenAIChatMessages(
|
|
518
|
+
{
|
|
519
|
+
prompt,
|
|
520
|
+
systemMessageMode: getSystemMessageMode(this.modelId)
|
|
521
|
+
}
|
|
522
|
+
);
|
|
523
|
+
warnings.push(...messageWarnings);
|
|
524
|
+
const strictJsonSchema = (_c = openaiOptions.strictJsonSchema) != null ? _c : false;
|
|
525
|
+
const baseArgs = {
|
|
526
|
+
// model id:
|
|
527
|
+
model: this.modelId,
|
|
528
|
+
// model specific settings:
|
|
529
|
+
logit_bias: openaiOptions.logitBias,
|
|
530
|
+
logprobs: openaiOptions.logprobs === true || typeof openaiOptions.logprobs === "number" ? true : void 0,
|
|
531
|
+
top_logprobs: typeof openaiOptions.logprobs === "number" ? openaiOptions.logprobs : typeof openaiOptions.logprobs === "boolean" ? openaiOptions.logprobs ? 0 : void 0 : void 0,
|
|
532
|
+
user: openaiOptions.user,
|
|
533
|
+
parallel_tool_calls: openaiOptions.parallelToolCalls,
|
|
534
|
+
// standardized settings:
|
|
535
|
+
max_tokens: maxOutputTokens,
|
|
536
|
+
temperature,
|
|
537
|
+
top_p: topP,
|
|
538
|
+
frequency_penalty: frequencyPenalty,
|
|
539
|
+
presence_penalty: presencePenalty,
|
|
540
|
+
response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? structuredOutputs && responseFormat.schema != null ? {
|
|
541
|
+
type: "json_schema",
|
|
542
|
+
json_schema: {
|
|
543
|
+
schema: responseFormat.schema,
|
|
544
|
+
strict: strictJsonSchema,
|
|
545
|
+
name: (_d = responseFormat.name) != null ? _d : "response",
|
|
546
|
+
description: responseFormat.description
|
|
547
|
+
}
|
|
548
|
+
} : { type: "json_object" } : void 0,
|
|
549
|
+
stop: stopSequences,
|
|
550
|
+
seed,
|
|
551
|
+
verbosity: openaiOptions.textVerbosity,
|
|
552
|
+
// openai specific settings:
|
|
553
|
+
// TODO AI SDK 6: remove, we auto-map maxOutputTokens now
|
|
554
|
+
max_completion_tokens: openaiOptions.maxCompletionTokens,
|
|
555
|
+
store: openaiOptions.store,
|
|
556
|
+
metadata: openaiOptions.metadata,
|
|
557
|
+
prediction: openaiOptions.prediction,
|
|
558
|
+
reasoning_effort: openaiOptions.reasoningEffort,
|
|
559
|
+
service_tier: openaiOptions.serviceTier,
|
|
560
|
+
prompt_cache_key: openaiOptions.promptCacheKey,
|
|
561
|
+
safety_identifier: openaiOptions.safetyIdentifier,
|
|
562
|
+
// messages:
|
|
563
|
+
messages
|
|
564
|
+
};
|
|
565
|
+
if (isReasoningModel(this.modelId)) {
|
|
566
|
+
if (baseArgs.temperature != null) {
|
|
567
|
+
baseArgs.temperature = void 0;
|
|
568
|
+
warnings.push({
|
|
569
|
+
type: "unsupported-setting",
|
|
570
|
+
setting: "temperature",
|
|
571
|
+
details: "temperature is not supported for reasoning models"
|
|
572
|
+
});
|
|
573
|
+
}
|
|
574
|
+
if (baseArgs.top_p != null) {
|
|
575
|
+
baseArgs.top_p = void 0;
|
|
576
|
+
warnings.push({
|
|
577
|
+
type: "unsupported-setting",
|
|
578
|
+
setting: "topP",
|
|
579
|
+
details: "topP is not supported for reasoning models"
|
|
580
|
+
});
|
|
581
|
+
}
|
|
582
|
+
if (baseArgs.frequency_penalty != null) {
|
|
583
|
+
baseArgs.frequency_penalty = void 0;
|
|
584
|
+
warnings.push({
|
|
585
|
+
type: "unsupported-setting",
|
|
586
|
+
setting: "frequencyPenalty",
|
|
587
|
+
details: "frequencyPenalty is not supported for reasoning models"
|
|
588
|
+
});
|
|
589
|
+
}
|
|
590
|
+
if (baseArgs.presence_penalty != null) {
|
|
591
|
+
baseArgs.presence_penalty = void 0;
|
|
592
|
+
warnings.push({
|
|
593
|
+
type: "unsupported-setting",
|
|
594
|
+
setting: "presencePenalty",
|
|
595
|
+
details: "presencePenalty is not supported for reasoning models"
|
|
596
|
+
});
|
|
597
|
+
}
|
|
598
|
+
if (baseArgs.logit_bias != null) {
|
|
599
|
+
baseArgs.logit_bias = void 0;
|
|
600
|
+
warnings.push({
|
|
601
|
+
type: "other",
|
|
602
|
+
message: "logitBias is not supported for reasoning models"
|
|
603
|
+
});
|
|
604
|
+
}
|
|
605
|
+
if (baseArgs.logprobs != null) {
|
|
606
|
+
baseArgs.logprobs = void 0;
|
|
607
|
+
warnings.push({
|
|
608
|
+
type: "other",
|
|
609
|
+
message: "logprobs is not supported for reasoning models"
|
|
610
|
+
});
|
|
611
|
+
}
|
|
612
|
+
if (baseArgs.top_logprobs != null) {
|
|
613
|
+
baseArgs.top_logprobs = void 0;
|
|
614
|
+
warnings.push({
|
|
615
|
+
type: "other",
|
|
616
|
+
message: "topLogprobs is not supported for reasoning models"
|
|
617
|
+
});
|
|
618
|
+
}
|
|
619
|
+
if (baseArgs.max_tokens != null) {
|
|
620
|
+
if (baseArgs.max_completion_tokens == null) {
|
|
621
|
+
baseArgs.max_completion_tokens = baseArgs.max_tokens;
|
|
622
|
+
}
|
|
623
|
+
baseArgs.max_tokens = void 0;
|
|
624
|
+
}
|
|
625
|
+
} else if (this.modelId.startsWith("gpt-4o-search-preview") || this.modelId.startsWith("gpt-4o-mini-search-preview")) {
|
|
626
|
+
if (baseArgs.temperature != null) {
|
|
627
|
+
baseArgs.temperature = void 0;
|
|
628
|
+
warnings.push({
|
|
629
|
+
type: "unsupported-setting",
|
|
630
|
+
setting: "temperature",
|
|
631
|
+
details: "temperature is not supported for the search preview models and has been removed."
|
|
632
|
+
});
|
|
633
|
+
}
|
|
634
|
+
}
|
|
635
|
+
if (openaiOptions.serviceTier === "flex" && !supportsFlexProcessing(this.modelId)) {
|
|
636
|
+
warnings.push({
|
|
637
|
+
type: "unsupported-setting",
|
|
638
|
+
setting: "serviceTier",
|
|
639
|
+
details: "flex processing is only available for o3, o4-mini, and gpt-5 models"
|
|
640
|
+
});
|
|
641
|
+
baseArgs.service_tier = void 0;
|
|
642
|
+
}
|
|
643
|
+
if (openaiOptions.serviceTier === "priority" && !supportsPriorityProcessing(this.modelId)) {
|
|
644
|
+
warnings.push({
|
|
645
|
+
type: "unsupported-setting",
|
|
646
|
+
setting: "serviceTier",
|
|
647
|
+
details: "priority processing is only available for supported models (gpt-4, gpt-5, gpt-5-mini, o3, o4-mini) and requires Enterprise access. gpt-5-nano is not supported"
|
|
648
|
+
});
|
|
649
|
+
baseArgs.service_tier = void 0;
|
|
650
|
+
}
|
|
651
|
+
const {
|
|
652
|
+
tools: openaiTools2,
|
|
653
|
+
toolChoice: openaiToolChoice,
|
|
654
|
+
toolWarnings
|
|
655
|
+
} = prepareChatTools({
|
|
656
|
+
tools,
|
|
657
|
+
toolChoice,
|
|
658
|
+
structuredOutputs,
|
|
659
|
+
strictJsonSchema
|
|
660
|
+
});
|
|
661
|
+
return {
|
|
662
|
+
args: {
|
|
663
|
+
...baseArgs,
|
|
664
|
+
tools: openaiTools2,
|
|
665
|
+
tool_choice: openaiToolChoice
|
|
666
|
+
},
|
|
667
|
+
warnings: [...warnings, ...toolWarnings]
|
|
668
|
+
};
|
|
669
|
+
}
|
|
670
|
+
async doGenerate(options) {
|
|
671
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n;
|
|
672
|
+
const { args: body, warnings } = await this.getArgs(options);
|
|
673
|
+
const {
|
|
674
|
+
responseHeaders,
|
|
675
|
+
value: response,
|
|
676
|
+
rawValue: rawResponse
|
|
677
|
+
} = await postJsonToApi({
|
|
678
|
+
url: this.config.url({
|
|
679
|
+
path: "/chat/completions",
|
|
680
|
+
modelId: this.modelId
|
|
681
|
+
}),
|
|
682
|
+
headers: combineHeaders(this.config.headers(), options.headers),
|
|
683
|
+
body,
|
|
684
|
+
failedResponseHandler: openaiFailedResponseHandler,
|
|
685
|
+
successfulResponseHandler: createJsonResponseHandler(
|
|
686
|
+
openaiChatResponseSchema
|
|
687
|
+
),
|
|
688
|
+
abortSignal: options.abortSignal,
|
|
689
|
+
fetch: this.config.fetch
|
|
690
|
+
});
|
|
691
|
+
const choice = response.choices[0];
|
|
692
|
+
const content = [];
|
|
693
|
+
const text = choice.message.content;
|
|
694
|
+
if (text != null && text.length > 0) {
|
|
695
|
+
content.push({ type: "text", text });
|
|
696
|
+
}
|
|
697
|
+
for (const toolCall of (_a = choice.message.tool_calls) != null ? _a : []) {
|
|
698
|
+
content.push({
|
|
699
|
+
type: "tool-call",
|
|
700
|
+
toolCallId: (_b = toolCall.id) != null ? _b : generateId(),
|
|
701
|
+
toolName: toolCall.function.name,
|
|
702
|
+
input: toolCall.function.arguments
|
|
703
|
+
});
|
|
704
|
+
}
|
|
705
|
+
for (const annotation of (_c = choice.message.annotations) != null ? _c : []) {
|
|
706
|
+
content.push({
|
|
707
|
+
type: "source",
|
|
708
|
+
sourceType: "url",
|
|
709
|
+
id: generateId(),
|
|
710
|
+
url: annotation.url,
|
|
711
|
+
title: annotation.title
|
|
712
|
+
});
|
|
713
|
+
}
|
|
714
|
+
const completionTokenDetails = (_d = response.usage) == null ? void 0 : _d.completion_tokens_details;
|
|
715
|
+
const promptTokenDetails = (_e = response.usage) == null ? void 0 : _e.prompt_tokens_details;
|
|
716
|
+
const providerMetadata = { openai: {} };
|
|
717
|
+
if ((completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens) != null) {
|
|
718
|
+
providerMetadata.openai.acceptedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens;
|
|
719
|
+
}
|
|
720
|
+
if ((completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens) != null) {
|
|
721
|
+
providerMetadata.openai.rejectedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens;
|
|
722
|
+
}
|
|
723
|
+
if (((_f = choice.logprobs) == null ? void 0 : _f.content) != null) {
|
|
724
|
+
providerMetadata.openai.logprobs = choice.logprobs.content;
|
|
725
|
+
}
|
|
726
|
+
return {
|
|
727
|
+
content,
|
|
728
|
+
finishReason: mapOpenAIFinishReason(choice.finish_reason),
|
|
729
|
+
usage: {
|
|
730
|
+
inputTokens: (_h = (_g = response.usage) == null ? void 0 : _g.prompt_tokens) != null ? _h : void 0,
|
|
731
|
+
outputTokens: (_j = (_i = response.usage) == null ? void 0 : _i.completion_tokens) != null ? _j : void 0,
|
|
732
|
+
totalTokens: (_l = (_k = response.usage) == null ? void 0 : _k.total_tokens) != null ? _l : void 0,
|
|
733
|
+
reasoningTokens: (_m = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null ? _m : void 0,
|
|
734
|
+
cachedInputTokens: (_n = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens) != null ? _n : void 0
|
|
735
|
+
},
|
|
736
|
+
request: { body },
|
|
737
|
+
response: {
|
|
738
|
+
...getResponseMetadata(response),
|
|
739
|
+
headers: responseHeaders,
|
|
740
|
+
body: rawResponse
|
|
741
|
+
},
|
|
742
|
+
warnings,
|
|
743
|
+
providerMetadata
|
|
744
|
+
};
|
|
745
|
+
}
|
|
746
|
+
async doStream(options) {
|
|
747
|
+
const { args, warnings } = await this.getArgs(options);
|
|
748
|
+
const body = {
|
|
749
|
+
...args,
|
|
750
|
+
stream: true,
|
|
751
|
+
stream_options: {
|
|
752
|
+
include_usage: true
|
|
753
|
+
}
|
|
754
|
+
};
|
|
755
|
+
const { responseHeaders, value: response } = await postJsonToApi({
|
|
756
|
+
url: this.config.url({
|
|
757
|
+
path: "/chat/completions",
|
|
758
|
+
modelId: this.modelId
|
|
759
|
+
}),
|
|
760
|
+
headers: combineHeaders(this.config.headers(), options.headers),
|
|
761
|
+
body,
|
|
762
|
+
failedResponseHandler: openaiFailedResponseHandler,
|
|
763
|
+
successfulResponseHandler: createEventSourceResponseHandler(
|
|
764
|
+
openaiChatChunkSchema
|
|
765
|
+
),
|
|
766
|
+
abortSignal: options.abortSignal,
|
|
767
|
+
fetch: this.config.fetch
|
|
768
|
+
});
|
|
769
|
+
const toolCalls = [];
|
|
770
|
+
let finishReason = "unknown";
|
|
771
|
+
const usage = {
|
|
772
|
+
inputTokens: void 0,
|
|
773
|
+
outputTokens: void 0,
|
|
774
|
+
totalTokens: void 0
|
|
775
|
+
};
|
|
776
|
+
let isFirstChunk = true;
|
|
777
|
+
let isActiveText = false;
|
|
778
|
+
const providerMetadata = { openai: {} };
|
|
779
|
+
return {
|
|
780
|
+
stream: response.pipeThrough(
|
|
781
|
+
new TransformStream({
|
|
782
|
+
start(controller) {
|
|
783
|
+
controller.enqueue({ type: "stream-start", warnings });
|
|
784
|
+
},
|
|
785
|
+
transform(chunk, controller) {
|
|
786
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x;
|
|
787
|
+
if (options.includeRawChunks) {
|
|
788
|
+
controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
|
|
789
|
+
}
|
|
790
|
+
if (!chunk.success) {
|
|
791
|
+
finishReason = "error";
|
|
792
|
+
controller.enqueue({ type: "error", error: chunk.error });
|
|
793
|
+
return;
|
|
794
|
+
}
|
|
795
|
+
const value = chunk.value;
|
|
796
|
+
if ("error" in value) {
|
|
797
|
+
finishReason = "error";
|
|
798
|
+
controller.enqueue({ type: "error", error: value.error });
|
|
799
|
+
return;
|
|
800
|
+
}
|
|
801
|
+
if (isFirstChunk) {
|
|
802
|
+
isFirstChunk = false;
|
|
803
|
+
controller.enqueue({
|
|
804
|
+
type: "response-metadata",
|
|
805
|
+
...getResponseMetadata(value)
|
|
806
|
+
});
|
|
807
|
+
}
|
|
808
|
+
if (value.usage != null) {
|
|
809
|
+
usage.inputTokens = (_a = value.usage.prompt_tokens) != null ? _a : void 0;
|
|
810
|
+
usage.outputTokens = (_b = value.usage.completion_tokens) != null ? _b : void 0;
|
|
811
|
+
usage.totalTokens = (_c = value.usage.total_tokens) != null ? _c : void 0;
|
|
812
|
+
usage.reasoningTokens = (_e = (_d = value.usage.completion_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : void 0;
|
|
813
|
+
usage.cachedInputTokens = (_g = (_f = value.usage.prompt_tokens_details) == null ? void 0 : _f.cached_tokens) != null ? _g : void 0;
|
|
814
|
+
if (((_h = value.usage.completion_tokens_details) == null ? void 0 : _h.accepted_prediction_tokens) != null) {
|
|
815
|
+
providerMetadata.openai.acceptedPredictionTokens = (_i = value.usage.completion_tokens_details) == null ? void 0 : _i.accepted_prediction_tokens;
|
|
816
|
+
}
|
|
817
|
+
if (((_j = value.usage.completion_tokens_details) == null ? void 0 : _j.rejected_prediction_tokens) != null) {
|
|
818
|
+
providerMetadata.openai.rejectedPredictionTokens = (_k = value.usage.completion_tokens_details) == null ? void 0 : _k.rejected_prediction_tokens;
|
|
819
|
+
}
|
|
820
|
+
}
|
|
821
|
+
const choice = value.choices[0];
|
|
822
|
+
if ((choice == null ? void 0 : choice.finish_reason) != null) {
|
|
823
|
+
finishReason = mapOpenAIFinishReason(choice.finish_reason);
|
|
824
|
+
}
|
|
825
|
+
if (((_l = choice == null ? void 0 : choice.logprobs) == null ? void 0 : _l.content) != null) {
|
|
826
|
+
providerMetadata.openai.logprobs = choice.logprobs.content;
|
|
827
|
+
}
|
|
828
|
+
if ((choice == null ? void 0 : choice.delta) == null) {
|
|
829
|
+
return;
|
|
830
|
+
}
|
|
831
|
+
const delta = choice.delta;
|
|
832
|
+
if (delta.content != null) {
|
|
833
|
+
if (!isActiveText) {
|
|
834
|
+
controller.enqueue({ type: "text-start", id: "0" });
|
|
835
|
+
isActiveText = true;
|
|
836
|
+
}
|
|
837
|
+
controller.enqueue({
|
|
838
|
+
type: "text-delta",
|
|
839
|
+
id: "0",
|
|
840
|
+
delta: delta.content
|
|
841
|
+
});
|
|
842
|
+
}
|
|
843
|
+
if (delta.tool_calls != null) {
|
|
844
|
+
for (const toolCallDelta of delta.tool_calls) {
|
|
845
|
+
const index = toolCallDelta.index;
|
|
846
|
+
if (toolCalls[index] == null) {
|
|
847
|
+
if (toolCallDelta.type !== "function") {
|
|
848
|
+
throw new InvalidResponseDataError({
|
|
849
|
+
data: toolCallDelta,
|
|
850
|
+
message: `Expected 'function' type.`
|
|
851
|
+
});
|
|
852
|
+
}
|
|
853
|
+
if (toolCallDelta.id == null) {
|
|
854
|
+
throw new InvalidResponseDataError({
|
|
855
|
+
data: toolCallDelta,
|
|
856
|
+
message: `Expected 'id' to be a string.`
|
|
857
|
+
});
|
|
858
|
+
}
|
|
859
|
+
if (((_m = toolCallDelta.function) == null ? void 0 : _m.name) == null) {
|
|
860
|
+
throw new InvalidResponseDataError({
|
|
861
|
+
data: toolCallDelta,
|
|
862
|
+
message: `Expected 'function.name' to be a string.`
|
|
863
|
+
});
|
|
864
|
+
}
|
|
865
|
+
controller.enqueue({
|
|
866
|
+
type: "tool-input-start",
|
|
867
|
+
id: toolCallDelta.id,
|
|
868
|
+
toolName: toolCallDelta.function.name
|
|
869
|
+
});
|
|
870
|
+
toolCalls[index] = {
|
|
871
|
+
id: toolCallDelta.id,
|
|
872
|
+
type: "function",
|
|
873
|
+
function: {
|
|
874
|
+
name: toolCallDelta.function.name,
|
|
875
|
+
arguments: (_n = toolCallDelta.function.arguments) != null ? _n : ""
|
|
876
|
+
},
|
|
877
|
+
hasFinished: false
|
|
878
|
+
};
|
|
879
|
+
const toolCall2 = toolCalls[index];
|
|
880
|
+
if (((_o = toolCall2.function) == null ? void 0 : _o.name) != null && ((_p = toolCall2.function) == null ? void 0 : _p.arguments) != null) {
|
|
881
|
+
if (toolCall2.function.arguments.length > 0) {
|
|
882
|
+
controller.enqueue({
|
|
883
|
+
type: "tool-input-delta",
|
|
884
|
+
id: toolCall2.id,
|
|
885
|
+
delta: toolCall2.function.arguments
|
|
886
|
+
});
|
|
887
|
+
}
|
|
888
|
+
if (isParsableJson(toolCall2.function.arguments)) {
|
|
889
|
+
controller.enqueue({
|
|
890
|
+
type: "tool-input-end",
|
|
891
|
+
id: toolCall2.id
|
|
892
|
+
});
|
|
893
|
+
controller.enqueue({
|
|
894
|
+
type: "tool-call",
|
|
895
|
+
toolCallId: (_q = toolCall2.id) != null ? _q : generateId(),
|
|
896
|
+
toolName: toolCall2.function.name,
|
|
897
|
+
input: toolCall2.function.arguments
|
|
898
|
+
});
|
|
899
|
+
toolCall2.hasFinished = true;
|
|
900
|
+
}
|
|
901
|
+
}
|
|
902
|
+
continue;
|
|
903
|
+
}
|
|
904
|
+
const toolCall = toolCalls[index];
|
|
905
|
+
if (toolCall.hasFinished) {
|
|
906
|
+
continue;
|
|
907
|
+
}
|
|
908
|
+
if (((_r = toolCallDelta.function) == null ? void 0 : _r.arguments) != null) {
|
|
909
|
+
toolCall.function.arguments += (_t = (_s = toolCallDelta.function) == null ? void 0 : _s.arguments) != null ? _t : "";
|
|
910
|
+
}
|
|
911
|
+
controller.enqueue({
|
|
912
|
+
type: "tool-input-delta",
|
|
913
|
+
id: toolCall.id,
|
|
914
|
+
delta: (_u = toolCallDelta.function.arguments) != null ? _u : ""
|
|
915
|
+
});
|
|
916
|
+
if (((_v = toolCall.function) == null ? void 0 : _v.name) != null && ((_w = toolCall.function) == null ? void 0 : _w.arguments) != null && isParsableJson(toolCall.function.arguments)) {
|
|
917
|
+
controller.enqueue({
|
|
918
|
+
type: "tool-input-end",
|
|
919
|
+
id: toolCall.id
|
|
920
|
+
});
|
|
921
|
+
controller.enqueue({
|
|
922
|
+
type: "tool-call",
|
|
923
|
+
toolCallId: (_x = toolCall.id) != null ? _x : generateId(),
|
|
924
|
+
toolName: toolCall.function.name,
|
|
925
|
+
input: toolCall.function.arguments
|
|
926
|
+
});
|
|
927
|
+
toolCall.hasFinished = true;
|
|
928
|
+
}
|
|
929
|
+
}
|
|
930
|
+
}
|
|
931
|
+
if (delta.annotations != null) {
|
|
932
|
+
for (const annotation of delta.annotations) {
|
|
933
|
+
controller.enqueue({
|
|
934
|
+
type: "source",
|
|
935
|
+
sourceType: "url",
|
|
936
|
+
id: generateId(),
|
|
937
|
+
url: annotation.url,
|
|
938
|
+
title: annotation.title
|
|
939
|
+
});
|
|
940
|
+
}
|
|
941
|
+
}
|
|
942
|
+
},
|
|
943
|
+
flush(controller) {
|
|
944
|
+
if (isActiveText) {
|
|
945
|
+
controller.enqueue({ type: "text-end", id: "0" });
|
|
946
|
+
}
|
|
947
|
+
controller.enqueue({
|
|
948
|
+
type: "finish",
|
|
949
|
+
finishReason,
|
|
950
|
+
usage,
|
|
951
|
+
...providerMetadata != null ? { providerMetadata } : {}
|
|
952
|
+
});
|
|
953
|
+
}
|
|
954
|
+
})
|
|
955
|
+
),
|
|
956
|
+
request: { body },
|
|
957
|
+
response: { headers: responseHeaders }
|
|
958
|
+
};
|
|
959
|
+
}
|
|
960
|
+
};
|
|
961
|
+
var openaiTokenUsageSchema = z.object({
|
|
962
|
+
prompt_tokens: z.number().nullish(),
|
|
963
|
+
completion_tokens: z.number().nullish(),
|
|
964
|
+
total_tokens: z.number().nullish(),
|
|
965
|
+
prompt_tokens_details: z.object({
|
|
966
|
+
cached_tokens: z.number().nullish()
|
|
967
|
+
}).nullish(),
|
|
968
|
+
completion_tokens_details: z.object({
|
|
969
|
+
reasoning_tokens: z.number().nullish(),
|
|
970
|
+
accepted_prediction_tokens: z.number().nullish(),
|
|
971
|
+
rejected_prediction_tokens: z.number().nullish()
|
|
972
|
+
}).nullish()
|
|
973
|
+
}).nullish();
|
|
974
|
+
var openaiChatResponseSchema = z.object({
|
|
975
|
+
id: z.string().nullish(),
|
|
976
|
+
created: z.number().nullish(),
|
|
977
|
+
model: z.string().nullish(),
|
|
978
|
+
choices: z.array(
|
|
979
|
+
z.object({
|
|
980
|
+
message: z.object({
|
|
981
|
+
role: z.literal("assistant").nullish(),
|
|
982
|
+
content: z.string().nullish(),
|
|
983
|
+
tool_calls: z.array(
|
|
984
|
+
z.object({
|
|
985
|
+
id: z.string().nullish(),
|
|
986
|
+
type: z.literal("function"),
|
|
987
|
+
function: z.object({
|
|
988
|
+
name: z.string(),
|
|
989
|
+
arguments: z.string()
|
|
990
|
+
})
|
|
991
|
+
})
|
|
992
|
+
).nullish(),
|
|
993
|
+
annotations: z.array(
|
|
994
|
+
z.object({
|
|
995
|
+
type: z.literal("url_citation"),
|
|
996
|
+
start_index: z.number(),
|
|
997
|
+
end_index: z.number(),
|
|
998
|
+
url: z.string(),
|
|
999
|
+
title: z.string()
|
|
1000
|
+
})
|
|
1001
|
+
).nullish()
|
|
1002
|
+
}),
|
|
1003
|
+
index: z.number(),
|
|
1004
|
+
logprobs: z.object({
|
|
1005
|
+
content: z.array(
|
|
1006
|
+
z.object({
|
|
1007
|
+
token: z.string(),
|
|
1008
|
+
logprob: z.number(),
|
|
1009
|
+
top_logprobs: z.array(
|
|
1010
|
+
z.object({
|
|
1011
|
+
token: z.string(),
|
|
1012
|
+
logprob: z.number()
|
|
1013
|
+
})
|
|
1014
|
+
)
|
|
1015
|
+
})
|
|
1016
|
+
).nullish()
|
|
1017
|
+
}).nullish(),
|
|
1018
|
+
finish_reason: z.string().nullish()
|
|
1019
|
+
})
|
|
1020
|
+
),
|
|
1021
|
+
usage: openaiTokenUsageSchema
|
|
1022
|
+
});
|
|
1023
|
+
var openaiChatChunkSchema = z.union([
|
|
1024
|
+
z.object({
|
|
1025
|
+
id: z.string().nullish(),
|
|
1026
|
+
created: z.number().nullish(),
|
|
1027
|
+
model: z.string().nullish(),
|
|
1028
|
+
choices: z.array(
|
|
1029
|
+
z.object({
|
|
1030
|
+
delta: z.object({
|
|
1031
|
+
role: z.enum(["assistant"]).nullish(),
|
|
1032
|
+
content: z.string().nullish(),
|
|
1033
|
+
tool_calls: z.array(
|
|
1034
|
+
z.object({
|
|
1035
|
+
index: z.number(),
|
|
1036
|
+
id: z.string().nullish(),
|
|
1037
|
+
type: z.literal("function").nullish(),
|
|
1038
|
+
function: z.object({
|
|
1039
|
+
name: z.string().nullish(),
|
|
1040
|
+
arguments: z.string().nullish()
|
|
1041
|
+
})
|
|
1042
|
+
})
|
|
1043
|
+
).nullish(),
|
|
1044
|
+
annotations: z.array(
|
|
1045
|
+
z.object({
|
|
1046
|
+
type: z.literal("url_citation"),
|
|
1047
|
+
start_index: z.number(),
|
|
1048
|
+
end_index: z.number(),
|
|
1049
|
+
url: z.string(),
|
|
1050
|
+
title: z.string()
|
|
1051
|
+
})
|
|
1052
|
+
).nullish()
|
|
1053
|
+
}).nullish(),
|
|
1054
|
+
logprobs: z.object({
|
|
1055
|
+
content: z.array(
|
|
1056
|
+
z.object({
|
|
1057
|
+
token: z.string(),
|
|
1058
|
+
logprob: z.number(),
|
|
1059
|
+
top_logprobs: z.array(
|
|
1060
|
+
z.object({
|
|
1061
|
+
token: z.string(),
|
|
1062
|
+
logprob: z.number()
|
|
1063
|
+
})
|
|
1064
|
+
)
|
|
1065
|
+
})
|
|
1066
|
+
).nullish()
|
|
1067
|
+
}).nullish(),
|
|
1068
|
+
finish_reason: z.string().nullish(),
|
|
1069
|
+
index: z.number()
|
|
1070
|
+
})
|
|
1071
|
+
),
|
|
1072
|
+
usage: openaiTokenUsageSchema
|
|
1073
|
+
}),
|
|
1074
|
+
openaiErrorDataSchema
|
|
1075
|
+
]);
|
|
1076
|
+
function isReasoningModel(modelId) {
|
|
1077
|
+
return (modelId.startsWith("o") || modelId.startsWith("gpt-5")) && !modelId.startsWith("gpt-5-chat");
|
|
1078
|
+
}
|
|
1079
|
+
function supportsFlexProcessing(modelId) {
|
|
1080
|
+
return modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-chat");
|
|
1081
|
+
}
|
|
1082
|
+
function supportsPriorityProcessing(modelId) {
|
|
1083
|
+
return modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") && !modelId.startsWith("gpt-5-chat") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
|
|
1084
|
+
}
|
|
1085
|
+
function getSystemMessageMode(modelId) {
|
|
1086
|
+
var _a, _b;
|
|
1087
|
+
if (!isReasoningModel(modelId)) {
|
|
1088
|
+
return "system";
|
|
1089
|
+
}
|
|
1090
|
+
return (_b = (_a = reasoningModels[modelId]) == null ? void 0 : _a.systemMessageMode) != null ? _b : "developer";
|
|
1091
|
+
}
|
|
1092
|
+
var reasoningModels = {
|
|
1093
|
+
"o1-mini": {
|
|
1094
|
+
systemMessageMode: "remove"
|
|
1095
|
+
},
|
|
1096
|
+
"o1-mini-2024-09-12": {
|
|
1097
|
+
systemMessageMode: "remove"
|
|
1098
|
+
},
|
|
1099
|
+
"o1-preview": {
|
|
1100
|
+
systemMessageMode: "remove"
|
|
1101
|
+
},
|
|
1102
|
+
"o1-preview-2024-09-12": {
|
|
1103
|
+
systemMessageMode: "remove"
|
|
1104
|
+
},
|
|
1105
|
+
o3: {
|
|
1106
|
+
systemMessageMode: "developer"
|
|
1107
|
+
},
|
|
1108
|
+
"o3-2025-04-16": {
|
|
1109
|
+
systemMessageMode: "developer"
|
|
1110
|
+
},
|
|
1111
|
+
"o3-mini": {
|
|
1112
|
+
systemMessageMode: "developer"
|
|
1113
|
+
},
|
|
1114
|
+
"o3-mini-2025-01-31": {
|
|
1115
|
+
systemMessageMode: "developer"
|
|
1116
|
+
},
|
|
1117
|
+
"o4-mini": {
|
|
1118
|
+
systemMessageMode: "developer"
|
|
1119
|
+
},
|
|
1120
|
+
"o4-mini-2025-04-16": {
|
|
1121
|
+
systemMessageMode: "developer"
|
|
1122
|
+
}
|
|
1123
|
+
};
|
|
1124
|
+
function convertToOpenAICompletionPrompt({
|
|
1125
|
+
prompt,
|
|
1126
|
+
user = "user",
|
|
1127
|
+
assistant = "assistant"
|
|
1128
|
+
}) {
|
|
1129
|
+
let text = "";
|
|
1130
|
+
if (prompt[0].role === "system") {
|
|
1131
|
+
text += `${prompt[0].content}
|
|
1132
|
+
|
|
1133
|
+
`;
|
|
1134
|
+
prompt = prompt.slice(1);
|
|
1135
|
+
}
|
|
1136
|
+
for (const { role, content } of prompt) {
|
|
1137
|
+
switch (role) {
|
|
1138
|
+
case "system": {
|
|
1139
|
+
throw new InvalidPromptError({
|
|
1140
|
+
message: "Unexpected system message in prompt: ${content}",
|
|
1141
|
+
prompt
|
|
1142
|
+
});
|
|
1143
|
+
}
|
|
1144
|
+
case "user": {
|
|
1145
|
+
const userMessage = content.map((part) => {
|
|
1146
|
+
switch (part.type) {
|
|
1147
|
+
case "text": {
|
|
1148
|
+
return part.text;
|
|
1149
|
+
}
|
|
1150
|
+
}
|
|
1151
|
+
}).filter(Boolean).join("");
|
|
1152
|
+
text += `${user}:
|
|
1153
|
+
${userMessage}
|
|
1154
|
+
|
|
1155
|
+
`;
|
|
1156
|
+
break;
|
|
1157
|
+
}
|
|
1158
|
+
case "assistant": {
|
|
1159
|
+
const assistantMessage = content.map((part) => {
|
|
1160
|
+
switch (part.type) {
|
|
1161
|
+
case "text": {
|
|
1162
|
+
return part.text;
|
|
1163
|
+
}
|
|
1164
|
+
case "tool-call": {
|
|
1165
|
+
throw new UnsupportedFunctionalityError({
|
|
1166
|
+
functionality: "tool-call messages"
|
|
1167
|
+
});
|
|
1168
|
+
}
|
|
1169
|
+
}
|
|
1170
|
+
}).join("");
|
|
1171
|
+
text += `${assistant}:
|
|
1172
|
+
${assistantMessage}
|
|
1173
|
+
|
|
1174
|
+
`;
|
|
1175
|
+
break;
|
|
1176
|
+
}
|
|
1177
|
+
case "tool": {
|
|
1178
|
+
throw new UnsupportedFunctionalityError({
|
|
1179
|
+
functionality: "tool messages"
|
|
1180
|
+
});
|
|
1181
|
+
}
|
|
1182
|
+
default: {
|
|
1183
|
+
const _exhaustiveCheck = role;
|
|
1184
|
+
throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
|
|
1185
|
+
}
|
|
1186
|
+
}
|
|
1187
|
+
}
|
|
1188
|
+
text += `${assistant}:
|
|
1189
|
+
`;
|
|
1190
|
+
return {
|
|
1191
|
+
prompt: text,
|
|
1192
|
+
stopSequences: [`
|
|
1193
|
+
${user}:`]
|
|
1194
|
+
};
|
|
1195
|
+
}
|
|
1196
|
+
function getResponseMetadata2({
|
|
1197
|
+
id,
|
|
1198
|
+
model,
|
|
1199
|
+
created
|
|
1200
|
+
}) {
|
|
1201
|
+
return {
|
|
1202
|
+
id: id != null ? id : void 0,
|
|
1203
|
+
modelId: model != null ? model : void 0,
|
|
1204
|
+
timestamp: created != null ? new Date(created * 1e3) : void 0
|
|
1205
|
+
};
|
|
1206
|
+
}
|
|
1207
|
+
function mapOpenAIFinishReason2(finishReason) {
|
|
1208
|
+
switch (finishReason) {
|
|
1209
|
+
case "stop":
|
|
1210
|
+
return "stop";
|
|
1211
|
+
case "length":
|
|
1212
|
+
return "length";
|
|
1213
|
+
case "content_filter":
|
|
1214
|
+
return "content-filter";
|
|
1215
|
+
case "function_call":
|
|
1216
|
+
case "tool_calls":
|
|
1217
|
+
return "tool-calls";
|
|
1218
|
+
default:
|
|
1219
|
+
return "unknown";
|
|
1220
|
+
}
|
|
1221
|
+
}
|
|
1222
|
+
var openaiCompletionProviderOptions = z.object({
|
|
1223
|
+
/**
|
|
1224
|
+
Echo back the prompt in addition to the completion.
|
|
1225
|
+
*/
|
|
1226
|
+
echo: z.boolean().optional(),
|
|
1227
|
+
/**
|
|
1228
|
+
Modify the likelihood of specified tokens appearing in the completion.
|
|
1229
|
+
|
|
1230
|
+
Accepts a JSON object that maps tokens (specified by their token ID in
|
|
1231
|
+
the GPT tokenizer) to an associated bias value from -100 to 100. You
|
|
1232
|
+
can use this tokenizer tool to convert text to token IDs. Mathematically,
|
|
1233
|
+
the bias is added to the logits generated by the model prior to sampling.
|
|
1234
|
+
The exact effect will vary per model, but values between -1 and 1 should
|
|
1235
|
+
decrease or increase likelihood of selection; values like -100 or 100
|
|
1236
|
+
should result in a ban or exclusive selection of the relevant token.
|
|
1237
|
+
|
|
1238
|
+
As an example, you can pass {"50256": -100} to prevent the <|endoftext|>
|
|
1239
|
+
token from being generated.
|
|
1240
|
+
*/
|
|
1241
|
+
logitBias: z.record(z.string(), z.number()).optional(),
|
|
1242
|
+
/**
|
|
1243
|
+
The suffix that comes after a completion of inserted text.
|
|
1244
|
+
*/
|
|
1245
|
+
suffix: z.string().optional(),
|
|
1246
|
+
/**
|
|
1247
|
+
A unique identifier representing your end-user, which can help OpenAI to
|
|
1248
|
+
monitor and detect abuse. Learn more.
|
|
1249
|
+
*/
|
|
1250
|
+
user: z.string().optional(),
|
|
1251
|
+
/**
|
|
1252
|
+
Return the log probabilities of the tokens. Including logprobs will increase
|
|
1253
|
+
the response size and can slow down response times. However, it can
|
|
1254
|
+
be useful to better understand how the model is behaving.
|
|
1255
|
+
Setting to true will return the log probabilities of the tokens that
|
|
1256
|
+
were generated.
|
|
1257
|
+
Setting to a number will return the log probabilities of the top n
|
|
1258
|
+
tokens that were generated.
|
|
1259
|
+
*/
|
|
1260
|
+
logprobs: z.union([z.boolean(), z.number()]).optional()
|
|
1261
|
+
});
|
|
1262
|
+
var OpenAICompletionLanguageModel = class {
|
|
1263
|
+
constructor(modelId, config) {
|
|
1264
|
+
this.specificationVersion = "v2";
|
|
1265
|
+
this.supportedUrls = {
|
|
1266
|
+
// No URLs are supported for completion models.
|
|
1267
|
+
};
|
|
1268
|
+
this.modelId = modelId;
|
|
1269
|
+
this.config = config;
|
|
1270
|
+
}
|
|
1271
|
+
get providerOptionsName() {
|
|
1272
|
+
return this.config.provider.split(".")[0].trim();
|
|
1273
|
+
}
|
|
1274
|
+
get provider() {
|
|
1275
|
+
return this.config.provider;
|
|
1276
|
+
}
|
|
1277
|
+
async getArgs({
|
|
1278
|
+
prompt,
|
|
1279
|
+
maxOutputTokens,
|
|
1280
|
+
temperature,
|
|
1281
|
+
topP,
|
|
1282
|
+
topK,
|
|
1283
|
+
frequencyPenalty,
|
|
1284
|
+
presencePenalty,
|
|
1285
|
+
stopSequences: userStopSequences,
|
|
1286
|
+
responseFormat,
|
|
1287
|
+
tools,
|
|
1288
|
+
toolChoice,
|
|
1289
|
+
seed,
|
|
1290
|
+
providerOptions
|
|
1291
|
+
}) {
|
|
1292
|
+
const warnings = [];
|
|
1293
|
+
const openaiOptions = {
|
|
1294
|
+
...await parseProviderOptions({
|
|
1295
|
+
provider: "openai",
|
|
1296
|
+
providerOptions,
|
|
1297
|
+
schema: openaiCompletionProviderOptions
|
|
1298
|
+
}),
|
|
1299
|
+
...await parseProviderOptions({
|
|
1300
|
+
provider: this.providerOptionsName,
|
|
1301
|
+
providerOptions,
|
|
1302
|
+
schema: openaiCompletionProviderOptions
|
|
1303
|
+
})
|
|
1304
|
+
};
|
|
1305
|
+
if (topK != null) {
|
|
1306
|
+
warnings.push({ type: "unsupported-setting", setting: "topK" });
|
|
1307
|
+
}
|
|
1308
|
+
if (tools == null ? void 0 : tools.length) {
|
|
1309
|
+
warnings.push({ type: "unsupported-setting", setting: "tools" });
|
|
1310
|
+
}
|
|
1311
|
+
if (toolChoice != null) {
|
|
1312
|
+
warnings.push({ type: "unsupported-setting", setting: "toolChoice" });
|
|
1313
|
+
}
|
|
1314
|
+
if (responseFormat != null && responseFormat.type !== "text") {
|
|
1315
|
+
warnings.push({
|
|
1316
|
+
type: "unsupported-setting",
|
|
1317
|
+
setting: "responseFormat",
|
|
1318
|
+
details: "JSON response format is not supported."
|
|
1319
|
+
});
|
|
1320
|
+
}
|
|
1321
|
+
const { prompt: completionPrompt, stopSequences } = convertToOpenAICompletionPrompt({ prompt });
|
|
1322
|
+
const stop = [...stopSequences != null ? stopSequences : [], ...userStopSequences != null ? userStopSequences : []];
|
|
1323
|
+
return {
|
|
1324
|
+
args: {
|
|
1325
|
+
// model id:
|
|
1326
|
+
model: this.modelId,
|
|
1327
|
+
// model specific settings:
|
|
1328
|
+
echo: openaiOptions.echo,
|
|
1329
|
+
logit_bias: openaiOptions.logitBias,
|
|
1330
|
+
logprobs: (openaiOptions == null ? void 0 : openaiOptions.logprobs) === true ? 0 : (openaiOptions == null ? void 0 : openaiOptions.logprobs) === false ? void 0 : openaiOptions == null ? void 0 : openaiOptions.logprobs,
|
|
1331
|
+
suffix: openaiOptions.suffix,
|
|
1332
|
+
user: openaiOptions.user,
|
|
1333
|
+
// standardized settings:
|
|
1334
|
+
max_tokens: maxOutputTokens,
|
|
1335
|
+
temperature,
|
|
1336
|
+
top_p: topP,
|
|
1337
|
+
frequency_penalty: frequencyPenalty,
|
|
1338
|
+
presence_penalty: presencePenalty,
|
|
1339
|
+
seed,
|
|
1340
|
+
// prompt:
|
|
1341
|
+
prompt: completionPrompt,
|
|
1342
|
+
// stop sequences:
|
|
1343
|
+
stop: stop.length > 0 ? stop : void 0
|
|
1344
|
+
},
|
|
1345
|
+
warnings
|
|
1346
|
+
};
|
|
1347
|
+
}
|
|
1348
|
+
async doGenerate(options) {
|
|
1349
|
+
var _a, _b, _c;
|
|
1350
|
+
const { args, warnings } = await this.getArgs(options);
|
|
1351
|
+
const {
|
|
1352
|
+
responseHeaders,
|
|
1353
|
+
value: response,
|
|
1354
|
+
rawValue: rawResponse
|
|
1355
|
+
} = await postJsonToApi({
|
|
1356
|
+
url: this.config.url({
|
|
1357
|
+
path: "/completions",
|
|
1358
|
+
modelId: this.modelId
|
|
1359
|
+
}),
|
|
1360
|
+
headers: combineHeaders(this.config.headers(), options.headers),
|
|
1361
|
+
body: args,
|
|
1362
|
+
failedResponseHandler: openaiFailedResponseHandler,
|
|
1363
|
+
successfulResponseHandler: createJsonResponseHandler(
|
|
1364
|
+
openaiCompletionResponseSchema
|
|
1365
|
+
),
|
|
1366
|
+
abortSignal: options.abortSignal,
|
|
1367
|
+
fetch: this.config.fetch
|
|
1368
|
+
});
|
|
1369
|
+
const choice = response.choices[0];
|
|
1370
|
+
const providerMetadata = { openai: {} };
|
|
1371
|
+
if (choice.logprobs != null) {
|
|
1372
|
+
providerMetadata.openai.logprobs = choice.logprobs;
|
|
1373
|
+
}
|
|
1374
|
+
return {
|
|
1375
|
+
content: [{ type: "text", text: choice.text }],
|
|
1376
|
+
usage: {
|
|
1377
|
+
inputTokens: (_a = response.usage) == null ? void 0 : _a.prompt_tokens,
|
|
1378
|
+
outputTokens: (_b = response.usage) == null ? void 0 : _b.completion_tokens,
|
|
1379
|
+
totalTokens: (_c = response.usage) == null ? void 0 : _c.total_tokens
|
|
1380
|
+
},
|
|
1381
|
+
finishReason: mapOpenAIFinishReason2(choice.finish_reason),
|
|
1382
|
+
request: { body: args },
|
|
1383
|
+
response: {
|
|
1384
|
+
...getResponseMetadata2(response),
|
|
1385
|
+
headers: responseHeaders,
|
|
1386
|
+
body: rawResponse
|
|
1387
|
+
},
|
|
1388
|
+
providerMetadata,
|
|
1389
|
+
warnings
|
|
1390
|
+
};
|
|
1391
|
+
}
|
|
1392
|
+
async doStream(options) {
|
|
1393
|
+
const { args, warnings } = await this.getArgs(options);
|
|
1394
|
+
const body = {
|
|
1395
|
+
...args,
|
|
1396
|
+
stream: true,
|
|
1397
|
+
stream_options: {
|
|
1398
|
+
include_usage: true
|
|
1399
|
+
}
|
|
1400
|
+
};
|
|
1401
|
+
const { responseHeaders, value: response } = await postJsonToApi({
|
|
1402
|
+
url: this.config.url({
|
|
1403
|
+
path: "/completions",
|
|
1404
|
+
modelId: this.modelId
|
|
1405
|
+
}),
|
|
1406
|
+
headers: combineHeaders(this.config.headers(), options.headers),
|
|
1407
|
+
body,
|
|
1408
|
+
failedResponseHandler: openaiFailedResponseHandler,
|
|
1409
|
+
successfulResponseHandler: createEventSourceResponseHandler(
|
|
1410
|
+
openaiCompletionChunkSchema
|
|
1411
|
+
),
|
|
1412
|
+
abortSignal: options.abortSignal,
|
|
1413
|
+
fetch: this.config.fetch
|
|
1414
|
+
});
|
|
1415
|
+
let finishReason = "unknown";
|
|
1416
|
+
const providerMetadata = { openai: {} };
|
|
1417
|
+
const usage = {
|
|
1418
|
+
inputTokens: void 0,
|
|
1419
|
+
outputTokens: void 0,
|
|
1420
|
+
totalTokens: void 0
|
|
1421
|
+
};
|
|
1422
|
+
let isFirstChunk = true;
|
|
1423
|
+
return {
|
|
1424
|
+
stream: response.pipeThrough(
|
|
1425
|
+
new TransformStream({
|
|
1426
|
+
start(controller) {
|
|
1427
|
+
controller.enqueue({ type: "stream-start", warnings });
|
|
1428
|
+
},
|
|
1429
|
+
transform(chunk, controller) {
|
|
1430
|
+
if (options.includeRawChunks) {
|
|
1431
|
+
controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
|
|
1432
|
+
}
|
|
1433
|
+
if (!chunk.success) {
|
|
1434
|
+
finishReason = "error";
|
|
1435
|
+
controller.enqueue({ type: "error", error: chunk.error });
|
|
1436
|
+
return;
|
|
1437
|
+
}
|
|
1438
|
+
const value = chunk.value;
|
|
1439
|
+
if ("error" in value) {
|
|
1440
|
+
finishReason = "error";
|
|
1441
|
+
controller.enqueue({ type: "error", error: value.error });
|
|
1442
|
+
return;
|
|
1443
|
+
}
|
|
1444
|
+
if (isFirstChunk) {
|
|
1445
|
+
isFirstChunk = false;
|
|
1446
|
+
controller.enqueue({
|
|
1447
|
+
type: "response-metadata",
|
|
1448
|
+
...getResponseMetadata2(value)
|
|
1449
|
+
});
|
|
1450
|
+
controller.enqueue({ type: "text-start", id: "0" });
|
|
1451
|
+
}
|
|
1452
|
+
if (value.usage != null) {
|
|
1453
|
+
usage.inputTokens = value.usage.prompt_tokens;
|
|
1454
|
+
usage.outputTokens = value.usage.completion_tokens;
|
|
1455
|
+
usage.totalTokens = value.usage.total_tokens;
|
|
1456
|
+
}
|
|
1457
|
+
const choice = value.choices[0];
|
|
1458
|
+
if ((choice == null ? void 0 : choice.finish_reason) != null) {
|
|
1459
|
+
finishReason = mapOpenAIFinishReason2(choice.finish_reason);
|
|
1460
|
+
}
|
|
1461
|
+
if ((choice == null ? void 0 : choice.logprobs) != null) {
|
|
1462
|
+
providerMetadata.openai.logprobs = choice.logprobs;
|
|
1463
|
+
}
|
|
1464
|
+
if ((choice == null ? void 0 : choice.text) != null && choice.text.length > 0) {
|
|
1465
|
+
controller.enqueue({
|
|
1466
|
+
type: "text-delta",
|
|
1467
|
+
id: "0",
|
|
1468
|
+
delta: choice.text
|
|
1469
|
+
});
|
|
1470
|
+
}
|
|
1471
|
+
},
|
|
1472
|
+
flush(controller) {
|
|
1473
|
+
if (!isFirstChunk) {
|
|
1474
|
+
controller.enqueue({ type: "text-end", id: "0" });
|
|
1475
|
+
}
|
|
1476
|
+
controller.enqueue({
|
|
1477
|
+
type: "finish",
|
|
1478
|
+
finishReason,
|
|
1479
|
+
providerMetadata,
|
|
1480
|
+
usage
|
|
1481
|
+
});
|
|
1482
|
+
}
|
|
1483
|
+
})
|
|
1484
|
+
),
|
|
1485
|
+
request: { body },
|
|
1486
|
+
response: { headers: responseHeaders }
|
|
1487
|
+
};
|
|
1488
|
+
}
|
|
1489
|
+
};
|
|
1490
|
+
var usageSchema = z.object({
|
|
1491
|
+
prompt_tokens: z.number(),
|
|
1492
|
+
completion_tokens: z.number(),
|
|
1493
|
+
total_tokens: z.number()
|
|
1494
|
+
});
|
|
1495
|
+
var openaiCompletionResponseSchema = z.object({
|
|
1496
|
+
id: z.string().nullish(),
|
|
1497
|
+
created: z.number().nullish(),
|
|
1498
|
+
model: z.string().nullish(),
|
|
1499
|
+
choices: z.array(
|
|
1500
|
+
z.object({
|
|
1501
|
+
text: z.string(),
|
|
1502
|
+
finish_reason: z.string(),
|
|
1503
|
+
logprobs: z.object({
|
|
1504
|
+
tokens: z.array(z.string()),
|
|
1505
|
+
token_logprobs: z.array(z.number()),
|
|
1506
|
+
top_logprobs: z.array(z.record(z.string(), z.number())).nullish()
|
|
1507
|
+
}).nullish()
|
|
1508
|
+
})
|
|
1509
|
+
),
|
|
1510
|
+
usage: usageSchema.nullish()
|
|
1511
|
+
});
|
|
1512
|
+
var openaiCompletionChunkSchema = z.union([
|
|
1513
|
+
z.object({
|
|
1514
|
+
id: z.string().nullish(),
|
|
1515
|
+
created: z.number().nullish(),
|
|
1516
|
+
model: z.string().nullish(),
|
|
1517
|
+
choices: z.array(
|
|
1518
|
+
z.object({
|
|
1519
|
+
text: z.string(),
|
|
1520
|
+
finish_reason: z.string().nullish(),
|
|
1521
|
+
index: z.number(),
|
|
1522
|
+
logprobs: z.object({
|
|
1523
|
+
tokens: z.array(z.string()),
|
|
1524
|
+
token_logprobs: z.array(z.number()),
|
|
1525
|
+
top_logprobs: z.array(z.record(z.string(), z.number())).nullish()
|
|
1526
|
+
}).nullish()
|
|
1527
|
+
})
|
|
1528
|
+
),
|
|
1529
|
+
usage: usageSchema.nullish()
|
|
1530
|
+
}),
|
|
1531
|
+
openaiErrorDataSchema
|
|
1532
|
+
]);
|
|
1533
|
+
var openaiEmbeddingProviderOptions = z.object({
|
|
1534
|
+
/**
|
|
1535
|
+
The number of dimensions the resulting output embeddings should have.
|
|
1536
|
+
Only supported in text-embedding-3 and later models.
|
|
1537
|
+
*/
|
|
1538
|
+
dimensions: z.number().optional(),
|
|
1539
|
+
/**
|
|
1540
|
+
A unique identifier representing your end-user, which can help OpenAI to
|
|
1541
|
+
monitor and detect abuse. Learn more.
|
|
1542
|
+
*/
|
|
1543
|
+
user: z.string().optional()
|
|
1544
|
+
});
|
|
1545
|
+
var OpenAIEmbeddingModel = class {
|
|
1546
|
+
constructor(modelId, config) {
|
|
1547
|
+
this.specificationVersion = "v2";
|
|
1548
|
+
this.maxEmbeddingsPerCall = 2048;
|
|
1549
|
+
this.supportsParallelCalls = true;
|
|
1550
|
+
this.modelId = modelId;
|
|
1551
|
+
this.config = config;
|
|
1552
|
+
}
|
|
1553
|
+
get provider() {
|
|
1554
|
+
return this.config.provider;
|
|
1555
|
+
}
|
|
1556
|
+
async doEmbed({
|
|
1557
|
+
values,
|
|
1558
|
+
headers,
|
|
1559
|
+
abortSignal,
|
|
1560
|
+
providerOptions
|
|
1561
|
+
}) {
|
|
1562
|
+
var _a;
|
|
1563
|
+
if (values.length > this.maxEmbeddingsPerCall) {
|
|
1564
|
+
throw new TooManyEmbeddingValuesForCallError({
|
|
1565
|
+
provider: this.provider,
|
|
1566
|
+
modelId: this.modelId,
|
|
1567
|
+
maxEmbeddingsPerCall: this.maxEmbeddingsPerCall,
|
|
1568
|
+
values
|
|
1569
|
+
});
|
|
1570
|
+
}
|
|
1571
|
+
const openaiOptions = (_a = await parseProviderOptions({
|
|
1572
|
+
provider: "openai",
|
|
1573
|
+
providerOptions,
|
|
1574
|
+
schema: openaiEmbeddingProviderOptions
|
|
1575
|
+
})) != null ? _a : {};
|
|
1576
|
+
const {
|
|
1577
|
+
responseHeaders,
|
|
1578
|
+
value: response,
|
|
1579
|
+
rawValue
|
|
1580
|
+
} = await postJsonToApi({
|
|
1581
|
+
url: this.config.url({
|
|
1582
|
+
path: "/embeddings",
|
|
1583
|
+
modelId: this.modelId
|
|
1584
|
+
}),
|
|
1585
|
+
headers: combineHeaders(this.config.headers(), headers),
|
|
1586
|
+
body: {
|
|
1587
|
+
model: this.modelId,
|
|
1588
|
+
input: values,
|
|
1589
|
+
encoding_format: "float",
|
|
1590
|
+
dimensions: openaiOptions.dimensions,
|
|
1591
|
+
user: openaiOptions.user
|
|
1592
|
+
},
|
|
1593
|
+
failedResponseHandler: openaiFailedResponseHandler,
|
|
1594
|
+
successfulResponseHandler: createJsonResponseHandler(
|
|
1595
|
+
openaiTextEmbeddingResponseSchema
|
|
1596
|
+
),
|
|
1597
|
+
abortSignal,
|
|
1598
|
+
fetch: this.config.fetch
|
|
1599
|
+
});
|
|
1600
|
+
return {
|
|
1601
|
+
embeddings: response.data.map((item) => item.embedding),
|
|
1602
|
+
usage: response.usage ? { tokens: response.usage.prompt_tokens } : void 0,
|
|
1603
|
+
response: { headers: responseHeaders, body: rawValue }
|
|
1604
|
+
};
|
|
1605
|
+
}
|
|
1606
|
+
};
|
|
1607
|
+
var openaiTextEmbeddingResponseSchema = z.object({
|
|
1608
|
+
data: z.array(z.object({ embedding: z.array(z.number()) })),
|
|
1609
|
+
usage: z.object({ prompt_tokens: z.number() }).nullish()
|
|
1610
|
+
});
|
|
1611
|
+
var modelMaxImagesPerCall = {
|
|
1612
|
+
"dall-e-3": 1,
|
|
1613
|
+
"dall-e-2": 10,
|
|
1614
|
+
"gpt-image-1": 10
|
|
1615
|
+
};
|
|
1616
|
+
var hasDefaultResponseFormat = /* @__PURE__ */ new Set(["gpt-image-1"]);
|
|
1617
|
+
var OpenAIImageModel = class {
|
|
1618
|
+
constructor(modelId, config) {
|
|
1619
|
+
this.modelId = modelId;
|
|
1620
|
+
this.config = config;
|
|
1621
|
+
this.specificationVersion = "v2";
|
|
1622
|
+
}
|
|
1623
|
+
get maxImagesPerCall() {
|
|
1624
|
+
var _a;
|
|
1625
|
+
return (_a = modelMaxImagesPerCall[this.modelId]) != null ? _a : 1;
|
|
1626
|
+
}
|
|
1627
|
+
get provider() {
|
|
1628
|
+
return this.config.provider;
|
|
1629
|
+
}
|
|
1630
|
+
async doGenerate({
|
|
1631
|
+
prompt,
|
|
1632
|
+
n,
|
|
1633
|
+
size,
|
|
1634
|
+
aspectRatio,
|
|
1635
|
+
seed,
|
|
1636
|
+
providerOptions,
|
|
1637
|
+
headers,
|
|
1638
|
+
abortSignal
|
|
1639
|
+
}) {
|
|
1640
|
+
var _a, _b, _c, _d;
|
|
1641
|
+
const warnings = [];
|
|
1642
|
+
if (aspectRatio != null) {
|
|
1643
|
+
warnings.push({
|
|
1644
|
+
type: "unsupported-setting",
|
|
1645
|
+
setting: "aspectRatio",
|
|
1646
|
+
details: "This model does not support aspect ratio. Use `size` instead."
|
|
1647
|
+
});
|
|
1648
|
+
}
|
|
1649
|
+
if (seed != null) {
|
|
1650
|
+
warnings.push({ type: "unsupported-setting", setting: "seed" });
|
|
1651
|
+
}
|
|
1652
|
+
const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
|
|
1653
|
+
const { value: response, responseHeaders } = await postJsonToApi({
|
|
1654
|
+
url: this.config.url({
|
|
1655
|
+
path: "/images/generations",
|
|
1656
|
+
modelId: this.modelId
|
|
1657
|
+
}),
|
|
1658
|
+
headers: combineHeaders(this.config.headers(), headers),
|
|
1659
|
+
body: {
|
|
1660
|
+
model: this.modelId,
|
|
1661
|
+
prompt,
|
|
1662
|
+
n,
|
|
1663
|
+
size,
|
|
1664
|
+
...(_d = providerOptions.openai) != null ? _d : {},
|
|
1665
|
+
...!hasDefaultResponseFormat.has(this.modelId) ? { response_format: "b64_json" } : {}
|
|
1666
|
+
},
|
|
1667
|
+
failedResponseHandler: openaiFailedResponseHandler,
|
|
1668
|
+
successfulResponseHandler: createJsonResponseHandler(
|
|
1669
|
+
openaiImageResponseSchema
|
|
1670
|
+
),
|
|
1671
|
+
abortSignal,
|
|
1672
|
+
fetch: this.config.fetch
|
|
1673
|
+
});
|
|
1674
|
+
return {
|
|
1675
|
+
images: response.data.map((item) => item.b64_json),
|
|
1676
|
+
warnings,
|
|
1677
|
+
response: {
|
|
1678
|
+
timestamp: currentDate,
|
|
1679
|
+
modelId: this.modelId,
|
|
1680
|
+
headers: responseHeaders
|
|
1681
|
+
},
|
|
1682
|
+
providerMetadata: {
|
|
1683
|
+
openai: {
|
|
1684
|
+
images: response.data.map(
|
|
1685
|
+
(item) => item.revised_prompt ? {
|
|
1686
|
+
revisedPrompt: item.revised_prompt
|
|
1687
|
+
} : null
|
|
1688
|
+
)
|
|
1689
|
+
}
|
|
1690
|
+
}
|
|
1691
|
+
};
|
|
1692
|
+
}
|
|
1693
|
+
};
|
|
1694
|
+
var openaiImageResponseSchema = z.object({
|
|
1695
|
+
data: z.array(
|
|
1696
|
+
z.object({ b64_json: z.string(), revised_prompt: z.string().optional() })
|
|
1697
|
+
)
|
|
1698
|
+
});
|
|
1699
|
+
var codeInterpreterArgsSchema = z.object({
|
|
1700
|
+
container: z.union([
|
|
1701
|
+
z.string(),
|
|
1702
|
+
z.object({
|
|
1703
|
+
fileIds: z.array(z.string()).optional()
|
|
1704
|
+
})
|
|
1705
|
+
]).optional()
|
|
1706
|
+
});
|
|
1707
|
+
var codeInterpreter = createProviderDefinedToolFactory({
|
|
1708
|
+
id: "openai.code_interpreter",
|
|
1709
|
+
name: "code_interpreter",
|
|
1710
|
+
inputSchema: z.object({})
|
|
1711
|
+
});
|
|
1712
|
+
var openaiTools = {
|
|
1713
|
+
codeInterpreter,
|
|
1714
|
+
fileSearch,
|
|
1715
|
+
webSearchPreview
|
|
1716
|
+
};
|
|
1717
|
+
function isFileId(data, prefixes) {
|
|
1718
|
+
if (!prefixes) return false;
|
|
1719
|
+
return prefixes.some((prefix) => data.startsWith(prefix));
|
|
1720
|
+
}
|
|
1721
|
+
async function convertToOpenAIResponsesMessages({
|
|
1722
|
+
prompt,
|
|
1723
|
+
systemMessageMode,
|
|
1724
|
+
fileIdPrefixes
|
|
1725
|
+
}) {
|
|
1726
|
+
var _a, _b, _c, _d, _e, _f;
|
|
1727
|
+
const messages = [];
|
|
1728
|
+
const warnings = [];
|
|
1729
|
+
for (const { role, content } of prompt) {
|
|
1730
|
+
switch (role) {
|
|
1731
|
+
case "system": {
|
|
1732
|
+
switch (systemMessageMode) {
|
|
1733
|
+
case "system": {
|
|
1734
|
+
messages.push({ role: "system", content });
|
|
1735
|
+
break;
|
|
1736
|
+
}
|
|
1737
|
+
case "developer": {
|
|
1738
|
+
messages.push({ role: "developer", content });
|
|
1739
|
+
break;
|
|
1740
|
+
}
|
|
1741
|
+
case "remove": {
|
|
1742
|
+
warnings.push({
|
|
1743
|
+
type: "other",
|
|
1744
|
+
message: "system messages are removed for this model"
|
|
1745
|
+
});
|
|
1746
|
+
break;
|
|
1747
|
+
}
|
|
1748
|
+
default: {
|
|
1749
|
+
const _exhaustiveCheck = systemMessageMode;
|
|
1750
|
+
throw new Error(
|
|
1751
|
+
`Unsupported system message mode: ${_exhaustiveCheck}`
|
|
1752
|
+
);
|
|
1753
|
+
}
|
|
1754
|
+
}
|
|
1755
|
+
break;
|
|
1756
|
+
}
|
|
1757
|
+
case "user": {
|
|
1758
|
+
messages.push({
|
|
1759
|
+
role: "user",
|
|
1760
|
+
content: content.map((part, index) => {
|
|
1761
|
+
var _a2, _b2, _c2;
|
|
1762
|
+
switch (part.type) {
|
|
1763
|
+
case "text": {
|
|
1764
|
+
return { type: "input_text", text: part.text };
|
|
1765
|
+
}
|
|
1766
|
+
case "file": {
|
|
1767
|
+
if (part.mediaType.startsWith("image/")) {
|
|
1768
|
+
const mediaType = part.mediaType === "image/*" ? "image/jpeg" : part.mediaType;
|
|
1769
|
+
return {
|
|
1770
|
+
type: "input_image",
|
|
1771
|
+
...part.data instanceof URL ? { image_url: part.data.toString() } : typeof part.data === "string" && isFileId(part.data, fileIdPrefixes) ? { file_id: part.data } : {
|
|
1772
|
+
image_url: `data:${mediaType};base64,${convertToBase64(part.data)}`
|
|
1773
|
+
},
|
|
1774
|
+
detail: (_b2 = (_a2 = part.providerOptions) == null ? void 0 : _a2.openai) == null ? void 0 : _b2.imageDetail
|
|
1775
|
+
};
|
|
1776
|
+
} else if (part.mediaType === "application/pdf") {
|
|
1777
|
+
if (part.data instanceof URL) {
|
|
1778
|
+
throw new UnsupportedFunctionalityError({
|
|
1779
|
+
functionality: "PDF file parts with URLs"
|
|
1780
|
+
});
|
|
1781
|
+
}
|
|
1782
|
+
return {
|
|
1783
|
+
type: "input_file",
|
|
1784
|
+
...typeof part.data === "string" && isFileId(part.data, fileIdPrefixes) ? { file_id: part.data } : {
|
|
1785
|
+
filename: (_c2 = part.filename) != null ? _c2 : `part-${index}.pdf`,
|
|
1786
|
+
file_data: `data:application/pdf;base64,${convertToBase64(part.data)}`
|
|
1787
|
+
}
|
|
1788
|
+
};
|
|
1789
|
+
} else {
|
|
1790
|
+
throw new UnsupportedFunctionalityError({
|
|
1791
|
+
functionality: `file part media type ${part.mediaType}`
|
|
1792
|
+
});
|
|
1793
|
+
}
|
|
1794
|
+
}
|
|
1795
|
+
}
|
|
1796
|
+
})
|
|
1797
|
+
});
|
|
1798
|
+
break;
|
|
1799
|
+
}
|
|
1800
|
+
case "assistant": {
|
|
1801
|
+
const reasoningMessages = {};
|
|
1802
|
+
for (const part of content) {
|
|
1803
|
+
switch (part.type) {
|
|
1804
|
+
case "text": {
|
|
1805
|
+
messages.push({
|
|
1806
|
+
role: "assistant",
|
|
1807
|
+
content: [{ type: "output_text", text: part.text }],
|
|
1808
|
+
id: (_c = (_b = (_a = part.providerOptions) == null ? void 0 : _a.openai) == null ? void 0 : _b.itemId) != null ? _c : void 0
|
|
1809
|
+
});
|
|
1810
|
+
break;
|
|
1811
|
+
}
|
|
1812
|
+
case "tool-call": {
|
|
1813
|
+
if (part.providerExecuted) {
|
|
1814
|
+
break;
|
|
1815
|
+
}
|
|
1816
|
+
messages.push({
|
|
1817
|
+
type: "function_call",
|
|
1818
|
+
call_id: part.toolCallId,
|
|
1819
|
+
name: part.toolName,
|
|
1820
|
+
arguments: JSON.stringify(part.input),
|
|
1821
|
+
id: (_f = (_e = (_d = part.providerOptions) == null ? void 0 : _d.openai) == null ? void 0 : _e.itemId) != null ? _f : void 0
|
|
1822
|
+
});
|
|
1823
|
+
break;
|
|
1824
|
+
}
|
|
1825
|
+
case "tool-result": {
|
|
1826
|
+
warnings.push({
|
|
1827
|
+
type: "other",
|
|
1828
|
+
message: `tool result parts in assistant messages are not supported for OpenAI responses`
|
|
1829
|
+
});
|
|
1830
|
+
break;
|
|
1831
|
+
}
|
|
1832
|
+
case "reasoning": {
|
|
1833
|
+
const providerOptions = await parseProviderOptions({
|
|
1834
|
+
provider: "openai",
|
|
1835
|
+
providerOptions: part.providerOptions,
|
|
1836
|
+
schema: openaiResponsesReasoningProviderOptionsSchema
|
|
1837
|
+
});
|
|
1838
|
+
const reasoningId = providerOptions == null ? void 0 : providerOptions.itemId;
|
|
1839
|
+
if (reasoningId != null) {
|
|
1840
|
+
const existingReasoningMessage = reasoningMessages[reasoningId];
|
|
1841
|
+
const summaryParts = [];
|
|
1842
|
+
if (part.text.length > 0) {
|
|
1843
|
+
summaryParts.push({ type: "summary_text", text: part.text });
|
|
1844
|
+
} else if (existingReasoningMessage !== void 0) {
|
|
1845
|
+
warnings.push({
|
|
1846
|
+
type: "other",
|
|
1847
|
+
message: `Cannot append empty reasoning part to existing reasoning sequence. Skipping reasoning part: ${JSON.stringify(part)}.`
|
|
1848
|
+
});
|
|
1849
|
+
}
|
|
1850
|
+
if (existingReasoningMessage === void 0) {
|
|
1851
|
+
reasoningMessages[reasoningId] = {
|
|
1852
|
+
type: "reasoning",
|
|
1853
|
+
id: reasoningId,
|
|
1854
|
+
encrypted_content: providerOptions == null ? void 0 : providerOptions.reasoningEncryptedContent,
|
|
1855
|
+
summary: summaryParts
|
|
1856
|
+
};
|
|
1857
|
+
messages.push(reasoningMessages[reasoningId]);
|
|
1858
|
+
} else {
|
|
1859
|
+
existingReasoningMessage.summary.push(...summaryParts);
|
|
1860
|
+
}
|
|
1861
|
+
} else {
|
|
1862
|
+
warnings.push({
|
|
1863
|
+
type: "other",
|
|
1864
|
+
message: `Non-OpenAI reasoning parts are not supported. Skipping reasoning part: ${JSON.stringify(part)}.`
|
|
1865
|
+
});
|
|
1866
|
+
}
|
|
1867
|
+
break;
|
|
1868
|
+
}
|
|
1869
|
+
}
|
|
1870
|
+
}
|
|
1871
|
+
break;
|
|
1872
|
+
}
|
|
1873
|
+
case "tool": {
|
|
1874
|
+
for (const part of content) {
|
|
1875
|
+
const output = part.output;
|
|
1876
|
+
let contentValue;
|
|
1877
|
+
switch (output.type) {
|
|
1878
|
+
case "text":
|
|
1879
|
+
case "error-text":
|
|
1880
|
+
contentValue = output.value;
|
|
1881
|
+
break;
|
|
1882
|
+
case "content":
|
|
1883
|
+
case "json":
|
|
1884
|
+
case "error-json":
|
|
1885
|
+
contentValue = JSON.stringify(output.value);
|
|
1886
|
+
break;
|
|
1887
|
+
}
|
|
1888
|
+
messages.push({
|
|
1889
|
+
type: "function_call_output",
|
|
1890
|
+
call_id: part.toolCallId,
|
|
1891
|
+
output: contentValue
|
|
1892
|
+
});
|
|
1893
|
+
}
|
|
1894
|
+
break;
|
|
1895
|
+
}
|
|
1896
|
+
default: {
|
|
1897
|
+
const _exhaustiveCheck = role;
|
|
1898
|
+
throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
|
|
1899
|
+
}
|
|
1900
|
+
}
|
|
1901
|
+
}
|
|
1902
|
+
return { messages, warnings };
|
|
1903
|
+
}
|
|
1904
|
+
var openaiResponsesReasoningProviderOptionsSchema = z.object({
|
|
1905
|
+
itemId: z.string().nullish(),
|
|
1906
|
+
reasoningEncryptedContent: z.string().nullish()
|
|
1907
|
+
});
|
|
1908
|
+
function mapOpenAIResponseFinishReason({
|
|
1909
|
+
finishReason,
|
|
1910
|
+
hasToolCalls
|
|
1911
|
+
}) {
|
|
1912
|
+
switch (finishReason) {
|
|
1913
|
+
case void 0:
|
|
1914
|
+
case null:
|
|
1915
|
+
return hasToolCalls ? "tool-calls" : "stop";
|
|
1916
|
+
case "max_output_tokens":
|
|
1917
|
+
return "length";
|
|
1918
|
+
case "content_filter":
|
|
1919
|
+
return "content-filter";
|
|
1920
|
+
default:
|
|
1921
|
+
return hasToolCalls ? "tool-calls" : "unknown";
|
|
1922
|
+
}
|
|
1923
|
+
}
|
|
1924
|
+
function prepareResponsesTools({
|
|
1925
|
+
tools,
|
|
1926
|
+
toolChoice,
|
|
1927
|
+
strictJsonSchema
|
|
1928
|
+
}) {
|
|
1929
|
+
tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
|
|
1930
|
+
const toolWarnings = [];
|
|
1931
|
+
if (tools == null) {
|
|
1932
|
+
return { tools: void 0, toolChoice: void 0, toolWarnings };
|
|
1933
|
+
}
|
|
1934
|
+
const openaiTools2 = [];
|
|
1935
|
+
for (const tool of tools) {
|
|
1936
|
+
switch (tool.type) {
|
|
1937
|
+
case "function":
|
|
1938
|
+
openaiTools2.push({
|
|
1939
|
+
type: "function",
|
|
1940
|
+
name: tool.name,
|
|
1941
|
+
description: tool.description,
|
|
1942
|
+
parameters: tool.inputSchema,
|
|
1943
|
+
strict: strictJsonSchema
|
|
1944
|
+
});
|
|
1945
|
+
break;
|
|
1946
|
+
case "provider-defined": {
|
|
1947
|
+
switch (tool.id) {
|
|
1948
|
+
case "openai.file_search": {
|
|
1949
|
+
const args = fileSearchArgsSchema.parse(tool.args);
|
|
1950
|
+
openaiTools2.push({
|
|
1951
|
+
type: "file_search",
|
|
1952
|
+
vector_store_ids: args.vectorStoreIds,
|
|
1953
|
+
max_num_results: args.maxNumResults,
|
|
1954
|
+
ranking_options: args.ranking ? { ranker: args.ranking.ranker } : void 0,
|
|
1955
|
+
filters: args.filters
|
|
1956
|
+
});
|
|
1957
|
+
break;
|
|
1958
|
+
}
|
|
1959
|
+
case "openai.web_search_preview": {
|
|
1960
|
+
const args = webSearchPreviewArgsSchema.parse(tool.args);
|
|
1961
|
+
openaiTools2.push({
|
|
1962
|
+
type: "web_search_preview",
|
|
1963
|
+
search_context_size: args.searchContextSize,
|
|
1964
|
+
user_location: args.userLocation
|
|
1965
|
+
});
|
|
1966
|
+
break;
|
|
1967
|
+
}
|
|
1968
|
+
case "openai.code_interpreter": {
|
|
1969
|
+
const args = codeInterpreterArgsSchema.parse(tool.args);
|
|
1970
|
+
openaiTools2.push({
|
|
1971
|
+
type: "code_interpreter",
|
|
1972
|
+
container: args.container == null ? { type: "auto", file_ids: void 0 } : typeof args.container === "string" ? args.container : { type: "auto", file_ids: args.container.fileIds }
|
|
1973
|
+
});
|
|
1974
|
+
break;
|
|
1975
|
+
}
|
|
1976
|
+
default: {
|
|
1977
|
+
toolWarnings.push({ type: "unsupported-tool", tool });
|
|
1978
|
+
break;
|
|
1979
|
+
}
|
|
1980
|
+
}
|
|
1981
|
+
break;
|
|
1982
|
+
}
|
|
1983
|
+
default:
|
|
1984
|
+
toolWarnings.push({ type: "unsupported-tool", tool });
|
|
1985
|
+
break;
|
|
1986
|
+
}
|
|
1987
|
+
}
|
|
1988
|
+
if (toolChoice == null) {
|
|
1989
|
+
return { tools: openaiTools2, toolChoice: void 0, toolWarnings };
|
|
1990
|
+
}
|
|
1991
|
+
const type = toolChoice.type;
|
|
1992
|
+
switch (type) {
|
|
1993
|
+
case "auto":
|
|
1994
|
+
case "none":
|
|
1995
|
+
case "required":
|
|
1996
|
+
return { tools: openaiTools2, toolChoice: type, toolWarnings };
|
|
1997
|
+
case "tool":
|
|
1998
|
+
return {
|
|
1999
|
+
tools: openaiTools2,
|
|
2000
|
+
toolChoice: toolChoice.toolName === "code_interpreter" || toolChoice.toolName === "file_search" || toolChoice.toolName === "web_search_preview" ? { type: toolChoice.toolName } : { type: "function", name: toolChoice.toolName },
|
|
2001
|
+
toolWarnings
|
|
2002
|
+
};
|
|
2003
|
+
default: {
|
|
2004
|
+
const _exhaustiveCheck = type;
|
|
2005
|
+
throw new UnsupportedFunctionalityError({
|
|
2006
|
+
functionality: `tool choice type: ${_exhaustiveCheck}`
|
|
2007
|
+
});
|
|
2008
|
+
}
|
|
2009
|
+
}
|
|
2010
|
+
}
|
|
2011
|
+
var TOP_LOGPROBS_MAX = 20;
|
|
2012
|
+
var LOGPROBS_SCHEMA = z.array(
|
|
2013
|
+
z.object({
|
|
2014
|
+
token: z.string(),
|
|
2015
|
+
logprob: z.number(),
|
|
2016
|
+
top_logprobs: z.array(
|
|
2017
|
+
z.object({
|
|
2018
|
+
token: z.string(),
|
|
2019
|
+
logprob: z.number()
|
|
2020
|
+
})
|
|
2021
|
+
)
|
|
2022
|
+
})
|
|
2023
|
+
);
|
|
2024
|
+
var OpenAIResponsesLanguageModel = class {
|
|
2025
|
+
constructor(modelId, config) {
|
|
2026
|
+
this.specificationVersion = "v2";
|
|
2027
|
+
this.supportedUrls = {
|
|
2028
|
+
"image/*": [/^https?:\/\/.*$/]
|
|
2029
|
+
};
|
|
2030
|
+
this.modelId = modelId;
|
|
2031
|
+
this.config = config;
|
|
2032
|
+
}
|
|
2033
|
+
get provider() {
|
|
2034
|
+
return this.config.provider;
|
|
2035
|
+
}
|
|
2036
|
+
async getArgs({
|
|
2037
|
+
maxOutputTokens,
|
|
2038
|
+
temperature,
|
|
2039
|
+
stopSequences,
|
|
2040
|
+
topP,
|
|
2041
|
+
topK,
|
|
2042
|
+
presencePenalty,
|
|
2043
|
+
frequencyPenalty,
|
|
2044
|
+
seed,
|
|
2045
|
+
prompt,
|
|
2046
|
+
providerOptions,
|
|
2047
|
+
tools,
|
|
2048
|
+
toolChoice,
|
|
2049
|
+
responseFormat
|
|
2050
|
+
}) {
|
|
2051
|
+
var _a, _b;
|
|
2052
|
+
const warnings = [];
|
|
2053
|
+
const modelConfig = getResponsesModelConfig(this.modelId);
|
|
2054
|
+
if (topK != null) {
|
|
2055
|
+
warnings.push({ type: "unsupported-setting", setting: "topK" });
|
|
2056
|
+
}
|
|
2057
|
+
if (seed != null) {
|
|
2058
|
+
warnings.push({ type: "unsupported-setting", setting: "seed" });
|
|
2059
|
+
}
|
|
2060
|
+
if (presencePenalty != null) {
|
|
2061
|
+
warnings.push({
|
|
2062
|
+
type: "unsupported-setting",
|
|
2063
|
+
setting: "presencePenalty"
|
|
2064
|
+
});
|
|
2065
|
+
}
|
|
2066
|
+
if (frequencyPenalty != null) {
|
|
2067
|
+
warnings.push({
|
|
2068
|
+
type: "unsupported-setting",
|
|
2069
|
+
setting: "frequencyPenalty"
|
|
2070
|
+
});
|
|
2071
|
+
}
|
|
2072
|
+
if (stopSequences != null) {
|
|
2073
|
+
warnings.push({ type: "unsupported-setting", setting: "stopSequences" });
|
|
2074
|
+
}
|
|
2075
|
+
const { messages, warnings: messageWarnings } = await convertToOpenAIResponsesMessages({
|
|
2076
|
+
prompt,
|
|
2077
|
+
systemMessageMode: modelConfig.systemMessageMode,
|
|
2078
|
+
fileIdPrefixes: this.config.fileIdPrefixes
|
|
2079
|
+
});
|
|
2080
|
+
warnings.push(...messageWarnings);
|
|
2081
|
+
const openaiOptions = await parseProviderOptions({
|
|
2082
|
+
provider: "openai",
|
|
2083
|
+
providerOptions,
|
|
2084
|
+
schema: openaiResponsesProviderOptionsSchema
|
|
2085
|
+
});
|
|
2086
|
+
const strictJsonSchema = (_a = openaiOptions == null ? void 0 : openaiOptions.strictJsonSchema) != null ? _a : false;
|
|
2087
|
+
const topLogprobs = typeof (openaiOptions == null ? void 0 : openaiOptions.logprobs) === "number" ? openaiOptions == null ? void 0 : openaiOptions.logprobs : (openaiOptions == null ? void 0 : openaiOptions.logprobs) === true ? TOP_LOGPROBS_MAX : void 0;
|
|
2088
|
+
const openaiOptionsInclude = topLogprobs ? Array.isArray(openaiOptions == null ? void 0 : openaiOptions.include) ? [...openaiOptions == null ? void 0 : openaiOptions.include, "message.output_text.logprobs"] : ["message.output_text.logprobs"] : openaiOptions == null ? void 0 : openaiOptions.include;
|
|
2089
|
+
const baseArgs = {
|
|
2090
|
+
model: this.modelId,
|
|
2091
|
+
input: messages,
|
|
2092
|
+
temperature,
|
|
2093
|
+
top_p: topP,
|
|
2094
|
+
max_output_tokens: maxOutputTokens,
|
|
2095
|
+
...((responseFormat == null ? void 0 : responseFormat.type) === "json" || (openaiOptions == null ? void 0 : openaiOptions.textVerbosity)) && {
|
|
2096
|
+
text: {
|
|
2097
|
+
...(responseFormat == null ? void 0 : responseFormat.type) === "json" && {
|
|
2098
|
+
format: responseFormat.schema != null ? {
|
|
2099
|
+
type: "json_schema",
|
|
2100
|
+
strict: strictJsonSchema,
|
|
2101
|
+
name: (_b = responseFormat.name) != null ? _b : "response",
|
|
2102
|
+
description: responseFormat.description,
|
|
2103
|
+
schema: responseFormat.schema
|
|
2104
|
+
} : { type: "json_object" }
|
|
2105
|
+
},
|
|
2106
|
+
...(openaiOptions == null ? void 0 : openaiOptions.textVerbosity) && {
|
|
2107
|
+
verbosity: openaiOptions.textVerbosity
|
|
2108
|
+
}
|
|
2109
|
+
}
|
|
2110
|
+
},
|
|
2111
|
+
// provider options:
|
|
2112
|
+
metadata: openaiOptions == null ? void 0 : openaiOptions.metadata,
|
|
2113
|
+
parallel_tool_calls: openaiOptions == null ? void 0 : openaiOptions.parallelToolCalls,
|
|
2114
|
+
previous_response_id: openaiOptions == null ? void 0 : openaiOptions.previousResponseId,
|
|
2115
|
+
store: openaiOptions == null ? void 0 : openaiOptions.store,
|
|
2116
|
+
user: openaiOptions == null ? void 0 : openaiOptions.user,
|
|
2117
|
+
instructions: openaiOptions == null ? void 0 : openaiOptions.instructions,
|
|
2118
|
+
service_tier: openaiOptions == null ? void 0 : openaiOptions.serviceTier,
|
|
2119
|
+
include: openaiOptionsInclude,
|
|
2120
|
+
prompt_cache_key: openaiOptions == null ? void 0 : openaiOptions.promptCacheKey,
|
|
2121
|
+
safety_identifier: openaiOptions == null ? void 0 : openaiOptions.safetyIdentifier,
|
|
2122
|
+
top_logprobs: topLogprobs,
|
|
2123
|
+
// model-specific settings:
|
|
2124
|
+
...modelConfig.isReasoningModel && ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null || (openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) && {
|
|
2125
|
+
reasoning: {
|
|
2126
|
+
...(openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null && {
|
|
2127
|
+
effort: openaiOptions.reasoningEffort
|
|
2128
|
+
},
|
|
2129
|
+
...(openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null && {
|
|
2130
|
+
summary: openaiOptions.reasoningSummary
|
|
2131
|
+
}
|
|
2132
|
+
}
|
|
2133
|
+
},
|
|
2134
|
+
...modelConfig.requiredAutoTruncation && {
|
|
2135
|
+
truncation: "auto"
|
|
2136
|
+
}
|
|
2137
|
+
};
|
|
2138
|
+
if (modelConfig.isReasoningModel) {
|
|
2139
|
+
if (baseArgs.temperature != null) {
|
|
2140
|
+
baseArgs.temperature = void 0;
|
|
2141
|
+
warnings.push({
|
|
2142
|
+
type: "unsupported-setting",
|
|
2143
|
+
setting: "temperature",
|
|
2144
|
+
details: "temperature is not supported for reasoning models"
|
|
2145
|
+
});
|
|
2146
|
+
}
|
|
2147
|
+
if (baseArgs.top_p != null) {
|
|
2148
|
+
baseArgs.top_p = void 0;
|
|
2149
|
+
warnings.push({
|
|
2150
|
+
type: "unsupported-setting",
|
|
2151
|
+
setting: "topP",
|
|
2152
|
+
details: "topP is not supported for reasoning models"
|
|
2153
|
+
});
|
|
2154
|
+
}
|
|
2155
|
+
} else {
|
|
2156
|
+
if ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null) {
|
|
2157
|
+
warnings.push({
|
|
2158
|
+
type: "unsupported-setting",
|
|
2159
|
+
setting: "reasoningEffort",
|
|
2160
|
+
details: "reasoningEffort is not supported for non-reasoning models"
|
|
2161
|
+
});
|
|
2162
|
+
}
|
|
2163
|
+
if ((openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) {
|
|
2164
|
+
warnings.push({
|
|
2165
|
+
type: "unsupported-setting",
|
|
2166
|
+
setting: "reasoningSummary",
|
|
2167
|
+
details: "reasoningSummary is not supported for non-reasoning models"
|
|
2168
|
+
});
|
|
2169
|
+
}
|
|
2170
|
+
}
|
|
2171
|
+
if ((openaiOptions == null ? void 0 : openaiOptions.serviceTier) === "flex" && !modelConfig.supportsFlexProcessing) {
|
|
2172
|
+
warnings.push({
|
|
2173
|
+
type: "unsupported-setting",
|
|
2174
|
+
setting: "serviceTier",
|
|
2175
|
+
details: "flex processing is only available for o3, o4-mini, and gpt-5 models"
|
|
2176
|
+
});
|
|
2177
|
+
delete baseArgs.service_tier;
|
|
2178
|
+
}
|
|
2179
|
+
if ((openaiOptions == null ? void 0 : openaiOptions.serviceTier) === "priority" && !modelConfig.supportsPriorityProcessing) {
|
|
2180
|
+
warnings.push({
|
|
2181
|
+
type: "unsupported-setting",
|
|
2182
|
+
setting: "serviceTier",
|
|
2183
|
+
details: "priority processing is only available for supported models (gpt-4, gpt-5, gpt-5-mini, o3, o4-mini) and requires Enterprise access. gpt-5-nano is not supported"
|
|
2184
|
+
});
|
|
2185
|
+
delete baseArgs.service_tier;
|
|
2186
|
+
}
|
|
2187
|
+
const {
|
|
2188
|
+
tools: openaiTools2,
|
|
2189
|
+
toolChoice: openaiToolChoice,
|
|
2190
|
+
toolWarnings
|
|
2191
|
+
} = prepareResponsesTools({
|
|
2192
|
+
tools,
|
|
2193
|
+
toolChoice,
|
|
2194
|
+
strictJsonSchema
|
|
2195
|
+
});
|
|
2196
|
+
return {
|
|
2197
|
+
args: {
|
|
2198
|
+
...baseArgs,
|
|
2199
|
+
tools: openaiTools2,
|
|
2200
|
+
tool_choice: openaiToolChoice
|
|
2201
|
+
},
|
|
2202
|
+
warnings: [...warnings, ...toolWarnings]
|
|
2203
|
+
};
|
|
2204
|
+
}
|
|
2205
|
+
async doGenerate(options) {
|
|
2206
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q;
|
|
2207
|
+
const { args: body, warnings } = await this.getArgs(options);
|
|
2208
|
+
const url = this.config.url({
|
|
2209
|
+
path: "/responses",
|
|
2210
|
+
modelId: this.modelId
|
|
2211
|
+
});
|
|
2212
|
+
const {
|
|
2213
|
+
responseHeaders,
|
|
2214
|
+
value: response,
|
|
2215
|
+
rawValue: rawResponse
|
|
2216
|
+
} = await postJsonToApi({
|
|
2217
|
+
url,
|
|
2218
|
+
headers: combineHeaders(this.config.headers(), options.headers),
|
|
2219
|
+
body,
|
|
2220
|
+
failedResponseHandler: openaiFailedResponseHandler,
|
|
2221
|
+
successfulResponseHandler: createJsonResponseHandler(
|
|
2222
|
+
z.object({
|
|
2223
|
+
id: z.string(),
|
|
2224
|
+
created_at: z.number(),
|
|
2225
|
+
error: z.object({
|
|
2226
|
+
code: z.string(),
|
|
2227
|
+
message: z.string()
|
|
2228
|
+
}).nullish(),
|
|
2229
|
+
model: z.string(),
|
|
2230
|
+
output: z.array(
|
|
2231
|
+
z.discriminatedUnion("type", [
|
|
2232
|
+
z.object({
|
|
2233
|
+
type: z.literal("message"),
|
|
2234
|
+
role: z.literal("assistant"),
|
|
2235
|
+
id: z.string(),
|
|
2236
|
+
content: z.array(
|
|
2237
|
+
z.object({
|
|
2238
|
+
type: z.literal("output_text"),
|
|
2239
|
+
text: z.string(),
|
|
2240
|
+
logprobs: LOGPROBS_SCHEMA.nullish(),
|
|
2241
|
+
annotations: z.array(
|
|
2242
|
+
z.discriminatedUnion("type", [
|
|
2243
|
+
z.object({
|
|
2244
|
+
type: z.literal("url_citation"),
|
|
2245
|
+
start_index: z.number(),
|
|
2246
|
+
end_index: z.number(),
|
|
2247
|
+
url: z.string(),
|
|
2248
|
+
title: z.string()
|
|
2249
|
+
}),
|
|
2250
|
+
z.object({
|
|
2251
|
+
type: z.literal("file_citation"),
|
|
2252
|
+
start_index: z.number(),
|
|
2253
|
+
end_index: z.number(),
|
|
2254
|
+
file_id: z.string(),
|
|
2255
|
+
quote: z.string()
|
|
2256
|
+
})
|
|
2257
|
+
])
|
|
2258
|
+
)
|
|
2259
|
+
})
|
|
2260
|
+
)
|
|
2261
|
+
}),
|
|
2262
|
+
z.object({
|
|
2263
|
+
type: z.literal("function_call"),
|
|
2264
|
+
call_id: z.string(),
|
|
2265
|
+
name: z.string(),
|
|
2266
|
+
arguments: z.string(),
|
|
2267
|
+
id: z.string()
|
|
2268
|
+
}),
|
|
2269
|
+
z.object({
|
|
2270
|
+
type: z.literal("web_search_call"),
|
|
2271
|
+
id: z.string(),
|
|
2272
|
+
status: z.string().optional(),
|
|
2273
|
+
action: z.object({
|
|
2274
|
+
type: z.literal("search"),
|
|
2275
|
+
query: z.string().optional()
|
|
2276
|
+
}).nullish()
|
|
2277
|
+
}),
|
|
2278
|
+
z.object({
|
|
2279
|
+
type: z.literal("computer_call"),
|
|
2280
|
+
id: z.string(),
|
|
2281
|
+
status: z.string().optional()
|
|
2282
|
+
}),
|
|
2283
|
+
z.object({
|
|
2284
|
+
type: z.literal("file_search_call"),
|
|
2285
|
+
id: z.string(),
|
|
2286
|
+
status: z.string().optional(),
|
|
2287
|
+
queries: z.array(z.string()).nullish(),
|
|
2288
|
+
results: z.array(
|
|
2289
|
+
z.object({
|
|
2290
|
+
attributes: z.object({
|
|
2291
|
+
file_id: z.string(),
|
|
2292
|
+
filename: z.string(),
|
|
2293
|
+
score: z.number(),
|
|
2294
|
+
text: z.string()
|
|
2295
|
+
})
|
|
2296
|
+
})
|
|
2297
|
+
).nullish()
|
|
2298
|
+
}),
|
|
2299
|
+
z.object({
|
|
2300
|
+
type: z.literal("reasoning"),
|
|
2301
|
+
id: z.string(),
|
|
2302
|
+
encrypted_content: z.string().nullish(),
|
|
2303
|
+
summary: z.array(
|
|
2304
|
+
z.object({
|
|
2305
|
+
type: z.literal("summary_text"),
|
|
2306
|
+
text: z.string()
|
|
2307
|
+
})
|
|
2308
|
+
)
|
|
2309
|
+
})
|
|
2310
|
+
])
|
|
2311
|
+
),
|
|
2312
|
+
incomplete_details: z.object({ reason: z.string() }).nullable(),
|
|
2313
|
+
usage: usageSchema2
|
|
2314
|
+
})
|
|
2315
|
+
),
|
|
2316
|
+
abortSignal: options.abortSignal,
|
|
2317
|
+
fetch: this.config.fetch
|
|
2318
|
+
});
|
|
2319
|
+
if (response.error) {
|
|
2320
|
+
throw new APICallError({
|
|
2321
|
+
message: response.error.message,
|
|
2322
|
+
url,
|
|
2323
|
+
requestBodyValues: body,
|
|
2324
|
+
statusCode: 400,
|
|
2325
|
+
responseHeaders,
|
|
2326
|
+
responseBody: rawResponse,
|
|
2327
|
+
isRetryable: false
|
|
2328
|
+
});
|
|
2329
|
+
}
|
|
2330
|
+
const content = [];
|
|
2331
|
+
const logprobs = [];
|
|
2332
|
+
for (const part of response.output) {
|
|
2333
|
+
switch (part.type) {
|
|
2334
|
+
case "reasoning": {
|
|
2335
|
+
if (part.summary.length === 0) {
|
|
2336
|
+
part.summary.push({ type: "summary_text", text: "" });
|
|
2337
|
+
}
|
|
2338
|
+
for (const summary of part.summary) {
|
|
2339
|
+
content.push({
|
|
2340
|
+
type: "reasoning",
|
|
2341
|
+
text: summary.text,
|
|
2342
|
+
providerMetadata: {
|
|
2343
|
+
openai: {
|
|
2344
|
+
itemId: part.id,
|
|
2345
|
+
reasoningEncryptedContent: (_a = part.encrypted_content) != null ? _a : null
|
|
2346
|
+
}
|
|
2347
|
+
}
|
|
2348
|
+
});
|
|
2349
|
+
}
|
|
2350
|
+
break;
|
|
2351
|
+
}
|
|
2352
|
+
case "message": {
|
|
2353
|
+
for (const contentPart of part.content) {
|
|
2354
|
+
if (((_c = (_b = options.providerOptions) == null ? void 0 : _b.openai) == null ? void 0 : _c.logprobs) && contentPart.logprobs) {
|
|
2355
|
+
logprobs.push(contentPart.logprobs);
|
|
2356
|
+
}
|
|
2357
|
+
content.push({
|
|
2358
|
+
type: "text",
|
|
2359
|
+
text: contentPart.text,
|
|
2360
|
+
providerMetadata: {
|
|
2361
|
+
openai: {
|
|
2362
|
+
itemId: part.id
|
|
2363
|
+
}
|
|
2364
|
+
}
|
|
2365
|
+
});
|
|
2366
|
+
for (const annotation of contentPart.annotations) {
|
|
2367
|
+
if (annotation.type === "url_citation") {
|
|
2368
|
+
content.push({
|
|
2369
|
+
type: "source",
|
|
2370
|
+
sourceType: "url",
|
|
2371
|
+
id: (_f = (_e = (_d = this.config).generateId) == null ? void 0 : _e.call(_d)) != null ? _f : generateId(),
|
|
2372
|
+
url: annotation.url,
|
|
2373
|
+
title: annotation.title
|
|
2374
|
+
});
|
|
2375
|
+
} else if (annotation.type === "file_citation") {
|
|
2376
|
+
content.push({
|
|
2377
|
+
type: "source",
|
|
2378
|
+
sourceType: "document",
|
|
2379
|
+
id: (_i = (_h = (_g = this.config).generateId) == null ? void 0 : _h.call(_g)) != null ? _i : generateId(),
|
|
2380
|
+
mediaType: "text/plain",
|
|
2381
|
+
title: annotation.quote,
|
|
2382
|
+
filename: annotation.file_id
|
|
2383
|
+
});
|
|
2384
|
+
}
|
|
2385
|
+
}
|
|
2386
|
+
}
|
|
2387
|
+
break;
|
|
2388
|
+
}
|
|
2389
|
+
case "function_call": {
|
|
2390
|
+
content.push({
|
|
2391
|
+
type: "tool-call",
|
|
2392
|
+
toolCallId: part.call_id,
|
|
2393
|
+
toolName: part.name,
|
|
2394
|
+
input: part.arguments,
|
|
2395
|
+
providerMetadata: {
|
|
2396
|
+
openai: {
|
|
2397
|
+
itemId: part.id
|
|
2398
|
+
}
|
|
2399
|
+
}
|
|
2400
|
+
});
|
|
2401
|
+
break;
|
|
2402
|
+
}
|
|
2403
|
+
case "web_search_call": {
|
|
2404
|
+
content.push({
|
|
2405
|
+
type: "tool-call",
|
|
2406
|
+
toolCallId: part.id,
|
|
2407
|
+
toolName: "web_search_preview",
|
|
2408
|
+
input: (_k = (_j = part.action) == null ? void 0 : _j.query) != null ? _k : "",
|
|
2409
|
+
providerExecuted: true
|
|
2410
|
+
});
|
|
2411
|
+
content.push({
|
|
2412
|
+
type: "tool-result",
|
|
2413
|
+
toolCallId: part.id,
|
|
2414
|
+
toolName: "web_search_preview",
|
|
2415
|
+
result: {
|
|
2416
|
+
status: part.status || "completed",
|
|
2417
|
+
...((_l = part.action) == null ? void 0 : _l.query) && { query: part.action.query }
|
|
2418
|
+
},
|
|
2419
|
+
providerExecuted: true
|
|
2420
|
+
});
|
|
2421
|
+
break;
|
|
2422
|
+
}
|
|
2423
|
+
case "computer_call": {
|
|
2424
|
+
content.push({
|
|
2425
|
+
type: "tool-call",
|
|
2426
|
+
toolCallId: part.id,
|
|
2427
|
+
toolName: "computer_use",
|
|
2428
|
+
input: "",
|
|
2429
|
+
providerExecuted: true
|
|
2430
|
+
});
|
|
2431
|
+
content.push({
|
|
2432
|
+
type: "tool-result",
|
|
2433
|
+
toolCallId: part.id,
|
|
2434
|
+
toolName: "computer_use",
|
|
2435
|
+
result: {
|
|
2436
|
+
type: "computer_use_tool_result",
|
|
2437
|
+
status: part.status || "completed"
|
|
2438
|
+
},
|
|
2439
|
+
providerExecuted: true
|
|
2440
|
+
});
|
|
2441
|
+
break;
|
|
2442
|
+
}
|
|
2443
|
+
case "file_search_call": {
|
|
2444
|
+
content.push({
|
|
2445
|
+
type: "tool-call",
|
|
2446
|
+
toolCallId: part.id,
|
|
2447
|
+
toolName: "file_search",
|
|
2448
|
+
input: "",
|
|
2449
|
+
providerExecuted: true
|
|
2450
|
+
});
|
|
2451
|
+
content.push({
|
|
2452
|
+
type: "tool-result",
|
|
2453
|
+
toolCallId: part.id,
|
|
2454
|
+
toolName: "file_search",
|
|
2455
|
+
result: {
|
|
2456
|
+
type: "file_search_tool_result",
|
|
2457
|
+
status: part.status || "completed",
|
|
2458
|
+
...part.queries && { queries: part.queries },
|
|
2459
|
+
...part.results && { results: part.results }
|
|
2460
|
+
},
|
|
2461
|
+
providerExecuted: true
|
|
2462
|
+
});
|
|
2463
|
+
break;
|
|
2464
|
+
}
|
|
2465
|
+
}
|
|
2466
|
+
}
|
|
2467
|
+
const providerMetadata = {
|
|
2468
|
+
openai: { responseId: response.id }
|
|
2469
|
+
};
|
|
2470
|
+
if (logprobs.length > 0) {
|
|
2471
|
+
providerMetadata.openai.logprobs = logprobs;
|
|
2472
|
+
}
|
|
2473
|
+
return {
|
|
2474
|
+
content,
|
|
2475
|
+
finishReason: mapOpenAIResponseFinishReason({
|
|
2476
|
+
finishReason: (_m = response.incomplete_details) == null ? void 0 : _m.reason,
|
|
2477
|
+
hasToolCalls: content.some((part) => part.type === "tool-call")
|
|
2478
|
+
}),
|
|
2479
|
+
usage: {
|
|
2480
|
+
inputTokens: response.usage.input_tokens,
|
|
2481
|
+
outputTokens: response.usage.output_tokens,
|
|
2482
|
+
totalTokens: response.usage.input_tokens + response.usage.output_tokens,
|
|
2483
|
+
reasoningTokens: (_o = (_n = response.usage.output_tokens_details) == null ? void 0 : _n.reasoning_tokens) != null ? _o : void 0,
|
|
2484
|
+
cachedInputTokens: (_q = (_p = response.usage.input_tokens_details) == null ? void 0 : _p.cached_tokens) != null ? _q : void 0
|
|
2485
|
+
},
|
|
2486
|
+
request: { body },
|
|
2487
|
+
response: {
|
|
2488
|
+
id: response.id,
|
|
2489
|
+
timestamp: new Date(response.created_at * 1e3),
|
|
2490
|
+
modelId: response.model,
|
|
2491
|
+
headers: responseHeaders,
|
|
2492
|
+
body: rawResponse
|
|
2493
|
+
},
|
|
2494
|
+
providerMetadata,
|
|
2495
|
+
warnings
|
|
2496
|
+
};
|
|
2497
|
+
}
|
|
2498
|
+
async doStream(options) {
|
|
2499
|
+
const { args: body, warnings } = await this.getArgs(options);
|
|
2500
|
+
const { responseHeaders, value: response } = await postJsonToApi({
|
|
2501
|
+
url: this.config.url({
|
|
2502
|
+
path: "/responses",
|
|
2503
|
+
modelId: this.modelId
|
|
2504
|
+
}),
|
|
2505
|
+
headers: combineHeaders(this.config.headers(), options.headers),
|
|
2506
|
+
body: {
|
|
2507
|
+
...body,
|
|
2508
|
+
stream: true
|
|
2509
|
+
},
|
|
2510
|
+
failedResponseHandler: openaiFailedResponseHandler,
|
|
2511
|
+
successfulResponseHandler: createEventSourceResponseHandler(
|
|
2512
|
+
openaiResponsesChunkSchema
|
|
2513
|
+
),
|
|
2514
|
+
abortSignal: options.abortSignal,
|
|
2515
|
+
fetch: this.config.fetch
|
|
2516
|
+
});
|
|
2517
|
+
const self = this;
|
|
2518
|
+
let finishReason = "unknown";
|
|
2519
|
+
const usage = {
|
|
2520
|
+
inputTokens: void 0,
|
|
2521
|
+
outputTokens: void 0,
|
|
2522
|
+
totalTokens: void 0
|
|
2523
|
+
};
|
|
2524
|
+
const logprobs = [];
|
|
2525
|
+
let responseId = null;
|
|
2526
|
+
const ongoingToolCalls = {};
|
|
2527
|
+
let hasToolCalls = false;
|
|
2528
|
+
const activeReasoning = {};
|
|
2529
|
+
return {
|
|
2530
|
+
stream: response.pipeThrough(
|
|
2531
|
+
new TransformStream({
|
|
2532
|
+
start(controller) {
|
|
2533
|
+
controller.enqueue({ type: "stream-start", warnings });
|
|
2534
|
+
},
|
|
2535
|
+
transform(chunk, controller) {
|
|
2536
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s;
|
|
2537
|
+
if (options.includeRawChunks) {
|
|
2538
|
+
controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
|
|
2539
|
+
}
|
|
2540
|
+
if (!chunk.success) {
|
|
2541
|
+
finishReason = "error";
|
|
2542
|
+
controller.enqueue({ type: "error", error: chunk.error });
|
|
2543
|
+
return;
|
|
2544
|
+
}
|
|
2545
|
+
const value = chunk.value;
|
|
2546
|
+
if (isResponseOutputItemAddedChunk(value)) {
|
|
2547
|
+
if (value.item.type === "function_call") {
|
|
2548
|
+
ongoingToolCalls[value.output_index] = {
|
|
2549
|
+
toolName: value.item.name,
|
|
2550
|
+
toolCallId: value.item.call_id
|
|
2551
|
+
};
|
|
2552
|
+
controller.enqueue({
|
|
2553
|
+
type: "tool-input-start",
|
|
2554
|
+
id: value.item.call_id,
|
|
2555
|
+
toolName: value.item.name
|
|
2556
|
+
});
|
|
2557
|
+
} else if (value.item.type === "web_search_call") {
|
|
2558
|
+
ongoingToolCalls[value.output_index] = {
|
|
2559
|
+
toolName: "web_search_preview",
|
|
2560
|
+
toolCallId: value.item.id
|
|
2561
|
+
};
|
|
2562
|
+
controller.enqueue({
|
|
2563
|
+
type: "tool-input-start",
|
|
2564
|
+
id: value.item.id,
|
|
2565
|
+
toolName: "web_search_preview"
|
|
2566
|
+
});
|
|
2567
|
+
} else if (value.item.type === "computer_call") {
|
|
2568
|
+
ongoingToolCalls[value.output_index] = {
|
|
2569
|
+
toolName: "computer_use",
|
|
2570
|
+
toolCallId: value.item.id
|
|
2571
|
+
};
|
|
2572
|
+
controller.enqueue({
|
|
2573
|
+
type: "tool-input-start",
|
|
2574
|
+
id: value.item.id,
|
|
2575
|
+
toolName: "computer_use"
|
|
2576
|
+
});
|
|
2577
|
+
} else if (value.item.type === "file_search_call") {
|
|
2578
|
+
ongoingToolCalls[value.output_index] = {
|
|
2579
|
+
toolName: "file_search",
|
|
2580
|
+
toolCallId: value.item.id
|
|
2581
|
+
};
|
|
2582
|
+
controller.enqueue({
|
|
2583
|
+
type: "tool-input-start",
|
|
2584
|
+
id: value.item.id,
|
|
2585
|
+
toolName: "file_search"
|
|
2586
|
+
});
|
|
2587
|
+
} else if (value.item.type === "message") {
|
|
2588
|
+
controller.enqueue({
|
|
2589
|
+
type: "text-start",
|
|
2590
|
+
id: value.item.id,
|
|
2591
|
+
providerMetadata: {
|
|
2592
|
+
openai: {
|
|
2593
|
+
itemId: value.item.id
|
|
2594
|
+
}
|
|
2595
|
+
}
|
|
2596
|
+
});
|
|
2597
|
+
} else if (isResponseOutputItemAddedReasoningChunk(value)) {
|
|
2598
|
+
activeReasoning[value.item.id] = {
|
|
2599
|
+
encryptedContent: value.item.encrypted_content,
|
|
2600
|
+
summaryParts: [0]
|
|
2601
|
+
};
|
|
2602
|
+
controller.enqueue({
|
|
2603
|
+
type: "reasoning-start",
|
|
2604
|
+
id: `${value.item.id}:0`,
|
|
2605
|
+
providerMetadata: {
|
|
2606
|
+
openai: {
|
|
2607
|
+
itemId: value.item.id,
|
|
2608
|
+
reasoningEncryptedContent: (_a = value.item.encrypted_content) != null ? _a : null
|
|
2609
|
+
}
|
|
2610
|
+
}
|
|
2611
|
+
});
|
|
2612
|
+
}
|
|
2613
|
+
} else if (isResponseOutputItemDoneChunk(value)) {
|
|
2614
|
+
if (value.item.type === "function_call") {
|
|
2615
|
+
ongoingToolCalls[value.output_index] = void 0;
|
|
2616
|
+
hasToolCalls = true;
|
|
2617
|
+
controller.enqueue({
|
|
2618
|
+
type: "tool-input-end",
|
|
2619
|
+
id: value.item.call_id
|
|
2620
|
+
});
|
|
2621
|
+
controller.enqueue({
|
|
2622
|
+
type: "tool-call",
|
|
2623
|
+
toolCallId: value.item.call_id,
|
|
2624
|
+
toolName: value.item.name,
|
|
2625
|
+
input: value.item.arguments,
|
|
2626
|
+
providerMetadata: {
|
|
2627
|
+
openai: {
|
|
2628
|
+
itemId: value.item.id
|
|
2629
|
+
}
|
|
2630
|
+
}
|
|
2631
|
+
});
|
|
2632
|
+
} else if (value.item.type === "web_search_call") {
|
|
2633
|
+
ongoingToolCalls[value.output_index] = void 0;
|
|
2634
|
+
hasToolCalls = true;
|
|
2635
|
+
controller.enqueue({
|
|
2636
|
+
type: "tool-input-end",
|
|
2637
|
+
id: value.item.id
|
|
2638
|
+
});
|
|
2639
|
+
controller.enqueue({
|
|
2640
|
+
type: "tool-call",
|
|
2641
|
+
toolCallId: value.item.id,
|
|
2642
|
+
toolName: "web_search_preview",
|
|
2643
|
+
input: (_c = (_b = value.item.action) == null ? void 0 : _b.query) != null ? _c : "",
|
|
2644
|
+
providerExecuted: true
|
|
2645
|
+
});
|
|
2646
|
+
controller.enqueue({
|
|
2647
|
+
type: "tool-result",
|
|
2648
|
+
toolCallId: value.item.id,
|
|
2649
|
+
toolName: "web_search_preview",
|
|
2650
|
+
result: {
|
|
2651
|
+
type: "web_search_tool_result",
|
|
2652
|
+
status: value.item.status || "completed",
|
|
2653
|
+
...((_d = value.item.action) == null ? void 0 : _d.query) && {
|
|
2654
|
+
query: value.item.action.query
|
|
2655
|
+
}
|
|
2656
|
+
},
|
|
2657
|
+
providerExecuted: true
|
|
2658
|
+
});
|
|
2659
|
+
} else if (value.item.type === "computer_call") {
|
|
2660
|
+
ongoingToolCalls[value.output_index] = void 0;
|
|
2661
|
+
hasToolCalls = true;
|
|
2662
|
+
controller.enqueue({
|
|
2663
|
+
type: "tool-input-end",
|
|
2664
|
+
id: value.item.id
|
|
2665
|
+
});
|
|
2666
|
+
controller.enqueue({
|
|
2667
|
+
type: "tool-call",
|
|
2668
|
+
toolCallId: value.item.id,
|
|
2669
|
+
toolName: "computer_use",
|
|
2670
|
+
input: "",
|
|
2671
|
+
providerExecuted: true
|
|
2672
|
+
});
|
|
2673
|
+
controller.enqueue({
|
|
2674
|
+
type: "tool-result",
|
|
2675
|
+
toolCallId: value.item.id,
|
|
2676
|
+
toolName: "computer_use",
|
|
2677
|
+
result: {
|
|
2678
|
+
type: "computer_use_tool_result",
|
|
2679
|
+
status: value.item.status || "completed"
|
|
2680
|
+
},
|
|
2681
|
+
providerExecuted: true
|
|
2682
|
+
});
|
|
2683
|
+
} else if (value.item.type === "file_search_call") {
|
|
2684
|
+
ongoingToolCalls[value.output_index] = void 0;
|
|
2685
|
+
hasToolCalls = true;
|
|
2686
|
+
controller.enqueue({
|
|
2687
|
+
type: "tool-input-end",
|
|
2688
|
+
id: value.item.id
|
|
2689
|
+
});
|
|
2690
|
+
controller.enqueue({
|
|
2691
|
+
type: "tool-call",
|
|
2692
|
+
toolCallId: value.item.id,
|
|
2693
|
+
toolName: "file_search",
|
|
2694
|
+
input: "",
|
|
2695
|
+
providerExecuted: true
|
|
2696
|
+
});
|
|
2697
|
+
controller.enqueue({
|
|
2698
|
+
type: "tool-result",
|
|
2699
|
+
toolCallId: value.item.id,
|
|
2700
|
+
toolName: "file_search",
|
|
2701
|
+
result: {
|
|
2702
|
+
type: "file_search_tool_result",
|
|
2703
|
+
status: value.item.status || "completed",
|
|
2704
|
+
...value.item.queries && { queries: value.item.queries },
|
|
2705
|
+
...value.item.results && { results: value.item.results }
|
|
2706
|
+
},
|
|
2707
|
+
providerExecuted: true
|
|
2708
|
+
});
|
|
2709
|
+
} else if (value.item.type === "message") {
|
|
2710
|
+
controller.enqueue({
|
|
2711
|
+
type: "text-end",
|
|
2712
|
+
id: value.item.id
|
|
2713
|
+
});
|
|
2714
|
+
} else if (isResponseOutputItemDoneReasoningChunk(value)) {
|
|
2715
|
+
const activeReasoningPart = activeReasoning[value.item.id];
|
|
2716
|
+
for (const summaryIndex of activeReasoningPart.summaryParts) {
|
|
2717
|
+
controller.enqueue({
|
|
2718
|
+
type: "reasoning-end",
|
|
2719
|
+
id: `${value.item.id}:${summaryIndex}`,
|
|
2720
|
+
providerMetadata: {
|
|
2721
|
+
openai: {
|
|
2722
|
+
itemId: value.item.id,
|
|
2723
|
+
reasoningEncryptedContent: (_e = value.item.encrypted_content) != null ? _e : null
|
|
2724
|
+
}
|
|
2725
|
+
}
|
|
2726
|
+
});
|
|
2727
|
+
}
|
|
2728
|
+
delete activeReasoning[value.item.id];
|
|
2729
|
+
}
|
|
2730
|
+
} else if (isResponseFunctionCallArgumentsDeltaChunk(value)) {
|
|
2731
|
+
const toolCall = ongoingToolCalls[value.output_index];
|
|
2732
|
+
if (toolCall != null) {
|
|
2733
|
+
controller.enqueue({
|
|
2734
|
+
type: "tool-input-delta",
|
|
2735
|
+
id: toolCall.toolCallId,
|
|
2736
|
+
delta: value.delta
|
|
2737
|
+
});
|
|
2738
|
+
}
|
|
2739
|
+
} else if (isResponseCreatedChunk(value)) {
|
|
2740
|
+
responseId = value.response.id;
|
|
2741
|
+
controller.enqueue({
|
|
2742
|
+
type: "response-metadata",
|
|
2743
|
+
id: value.response.id,
|
|
2744
|
+
timestamp: new Date(value.response.created_at * 1e3),
|
|
2745
|
+
modelId: value.response.model
|
|
2746
|
+
});
|
|
2747
|
+
} else if (isTextDeltaChunk(value)) {
|
|
2748
|
+
controller.enqueue({
|
|
2749
|
+
type: "text-delta",
|
|
2750
|
+
id: value.item_id,
|
|
2751
|
+
delta: value.delta
|
|
2752
|
+
});
|
|
2753
|
+
if (value.logprobs) {
|
|
2754
|
+
logprobs.push(value.logprobs);
|
|
2755
|
+
}
|
|
2756
|
+
} else if (isResponseReasoningSummaryPartAddedChunk(value)) {
|
|
2757
|
+
if (value.summary_index > 0) {
|
|
2758
|
+
(_f = activeReasoning[value.item_id]) == null ? void 0 : _f.summaryParts.push(
|
|
2759
|
+
value.summary_index
|
|
2760
|
+
);
|
|
2761
|
+
controller.enqueue({
|
|
2762
|
+
type: "reasoning-start",
|
|
2763
|
+
id: `${value.item_id}:${value.summary_index}`,
|
|
2764
|
+
providerMetadata: {
|
|
2765
|
+
openai: {
|
|
2766
|
+
itemId: value.item_id,
|
|
2767
|
+
reasoningEncryptedContent: (_h = (_g = activeReasoning[value.item_id]) == null ? void 0 : _g.encryptedContent) != null ? _h : null
|
|
2768
|
+
}
|
|
2769
|
+
}
|
|
2770
|
+
});
|
|
2771
|
+
}
|
|
2772
|
+
} else if (isResponseReasoningSummaryTextDeltaChunk(value)) {
|
|
2773
|
+
controller.enqueue({
|
|
2774
|
+
type: "reasoning-delta",
|
|
2775
|
+
id: `${value.item_id}:${value.summary_index}`,
|
|
2776
|
+
delta: value.delta,
|
|
2777
|
+
providerMetadata: {
|
|
2778
|
+
openai: {
|
|
2779
|
+
itemId: value.item_id
|
|
2780
|
+
}
|
|
2781
|
+
}
|
|
2782
|
+
});
|
|
2783
|
+
} else if (isResponseFinishedChunk(value)) {
|
|
2784
|
+
finishReason = mapOpenAIResponseFinishReason({
|
|
2785
|
+
finishReason: (_i = value.response.incomplete_details) == null ? void 0 : _i.reason,
|
|
2786
|
+
hasToolCalls
|
|
2787
|
+
});
|
|
2788
|
+
usage.inputTokens = value.response.usage.input_tokens;
|
|
2789
|
+
usage.outputTokens = value.response.usage.output_tokens;
|
|
2790
|
+
usage.totalTokens = value.response.usage.input_tokens + value.response.usage.output_tokens;
|
|
2791
|
+
usage.reasoningTokens = (_k = (_j = value.response.usage.output_tokens_details) == null ? void 0 : _j.reasoning_tokens) != null ? _k : void 0;
|
|
2792
|
+
usage.cachedInputTokens = (_m = (_l = value.response.usage.input_tokens_details) == null ? void 0 : _l.cached_tokens) != null ? _m : void 0;
|
|
2793
|
+
} else if (isResponseAnnotationAddedChunk(value)) {
|
|
2794
|
+
if (value.annotation.type === "url_citation") {
|
|
2795
|
+
controller.enqueue({
|
|
2796
|
+
type: "source",
|
|
2797
|
+
sourceType: "url",
|
|
2798
|
+
id: (_p = (_o = (_n = self.config).generateId) == null ? void 0 : _o.call(_n)) != null ? _p : generateId(),
|
|
2799
|
+
url: value.annotation.url,
|
|
2800
|
+
title: value.annotation.title
|
|
2801
|
+
});
|
|
2802
|
+
} else if (value.annotation.type === "file_citation") {
|
|
2803
|
+
controller.enqueue({
|
|
2804
|
+
type: "source",
|
|
2805
|
+
sourceType: "document",
|
|
2806
|
+
id: (_s = (_r = (_q = self.config).generateId) == null ? void 0 : _r.call(_q)) != null ? _s : generateId(),
|
|
2807
|
+
mediaType: "text/plain",
|
|
2808
|
+
title: value.annotation.quote,
|
|
2809
|
+
filename: value.annotation.file_id
|
|
2810
|
+
});
|
|
2811
|
+
}
|
|
2812
|
+
} else if (isErrorChunk(value)) {
|
|
2813
|
+
controller.enqueue({ type: "error", error: value });
|
|
2814
|
+
}
|
|
2815
|
+
},
|
|
2816
|
+
flush(controller) {
|
|
2817
|
+
const providerMetadata = {
|
|
2818
|
+
openai: {
|
|
2819
|
+
responseId
|
|
2820
|
+
}
|
|
2821
|
+
};
|
|
2822
|
+
if (logprobs.length > 0) {
|
|
2823
|
+
providerMetadata.openai.logprobs = logprobs;
|
|
2824
|
+
}
|
|
2825
|
+
controller.enqueue({
|
|
2826
|
+
type: "finish",
|
|
2827
|
+
finishReason,
|
|
2828
|
+
usage,
|
|
2829
|
+
providerMetadata
|
|
2830
|
+
});
|
|
2831
|
+
}
|
|
2832
|
+
})
|
|
2833
|
+
),
|
|
2834
|
+
request: { body },
|
|
2835
|
+
response: { headers: responseHeaders }
|
|
2836
|
+
};
|
|
2837
|
+
}
|
|
2838
|
+
};
|
|
2839
|
+
var usageSchema2 = z.object({
|
|
2840
|
+
input_tokens: z.number(),
|
|
2841
|
+
input_tokens_details: z.object({ cached_tokens: z.number().nullish() }).nullish(),
|
|
2842
|
+
output_tokens: z.number(),
|
|
2843
|
+
output_tokens_details: z.object({ reasoning_tokens: z.number().nullish() }).nullish()
|
|
2844
|
+
});
|
|
2845
|
+
var textDeltaChunkSchema = z.object({
|
|
2846
|
+
type: z.literal("response.output_text.delta"),
|
|
2847
|
+
item_id: z.string(),
|
|
2848
|
+
delta: z.string(),
|
|
2849
|
+
logprobs: LOGPROBS_SCHEMA.nullish()
|
|
2850
|
+
});
|
|
2851
|
+
var errorChunkSchema = z.object({
|
|
2852
|
+
type: z.literal("error"),
|
|
2853
|
+
code: z.string(),
|
|
2854
|
+
message: z.string(),
|
|
2855
|
+
param: z.string().nullish(),
|
|
2856
|
+
sequence_number: z.number()
|
|
2857
|
+
});
|
|
2858
|
+
var responseFinishedChunkSchema = z.object({
|
|
2859
|
+
type: z.enum(["response.completed", "response.incomplete"]),
|
|
2860
|
+
response: z.object({
|
|
2861
|
+
incomplete_details: z.object({ reason: z.string() }).nullish(),
|
|
2862
|
+
usage: usageSchema2
|
|
2863
|
+
})
|
|
2864
|
+
});
|
|
2865
|
+
var responseCreatedChunkSchema = z.object({
|
|
2866
|
+
type: z.literal("response.created"),
|
|
2867
|
+
response: z.object({
|
|
2868
|
+
id: z.string(),
|
|
2869
|
+
created_at: z.number(),
|
|
2870
|
+
model: z.string()
|
|
2871
|
+
})
|
|
2872
|
+
});
|
|
2873
|
+
var responseOutputItemAddedSchema = z.object({
|
|
2874
|
+
type: z.literal("response.output_item.added"),
|
|
2875
|
+
output_index: z.number(),
|
|
2876
|
+
item: z.discriminatedUnion("type", [
|
|
2877
|
+
z.object({
|
|
2878
|
+
type: z.literal("message"),
|
|
2879
|
+
id: z.string()
|
|
2880
|
+
}),
|
|
2881
|
+
z.object({
|
|
2882
|
+
type: z.literal("reasoning"),
|
|
2883
|
+
id: z.string(),
|
|
2884
|
+
encrypted_content: z.string().nullish()
|
|
2885
|
+
}),
|
|
2886
|
+
z.object({
|
|
2887
|
+
type: z.literal("function_call"),
|
|
2888
|
+
id: z.string(),
|
|
2889
|
+
call_id: z.string(),
|
|
2890
|
+
name: z.string(),
|
|
2891
|
+
arguments: z.string()
|
|
2892
|
+
}),
|
|
2893
|
+
z.object({
|
|
2894
|
+
type: z.literal("web_search_call"),
|
|
2895
|
+
id: z.string(),
|
|
2896
|
+
status: z.string(),
|
|
2897
|
+
action: z.object({
|
|
2898
|
+
type: z.literal("search"),
|
|
2899
|
+
query: z.string().optional()
|
|
2900
|
+
}).nullish()
|
|
2901
|
+
}),
|
|
2902
|
+
z.object({
|
|
2903
|
+
type: z.literal("computer_call"),
|
|
2904
|
+
id: z.string(),
|
|
2905
|
+
status: z.string()
|
|
2906
|
+
}),
|
|
2907
|
+
z.object({
|
|
2908
|
+
type: z.literal("file_search_call"),
|
|
2909
|
+
id: z.string(),
|
|
2910
|
+
status: z.string(),
|
|
2911
|
+
queries: z.array(z.string()).nullish(),
|
|
2912
|
+
results: z.array(
|
|
2913
|
+
z.object({
|
|
2914
|
+
attributes: z.object({
|
|
2915
|
+
file_id: z.string(),
|
|
2916
|
+
filename: z.string(),
|
|
2917
|
+
score: z.number(),
|
|
2918
|
+
text: z.string()
|
|
2919
|
+
})
|
|
2920
|
+
})
|
|
2921
|
+
).optional()
|
|
2922
|
+
})
|
|
2923
|
+
])
|
|
2924
|
+
});
|
|
2925
|
+
var responseOutputItemDoneSchema = z.object({
|
|
2926
|
+
type: z.literal("response.output_item.done"),
|
|
2927
|
+
output_index: z.number(),
|
|
2928
|
+
item: z.discriminatedUnion("type", [
|
|
2929
|
+
z.object({
|
|
2930
|
+
type: z.literal("message"),
|
|
2931
|
+
id: z.string()
|
|
2932
|
+
}),
|
|
2933
|
+
z.object({
|
|
2934
|
+
type: z.literal("reasoning"),
|
|
2935
|
+
id: z.string(),
|
|
2936
|
+
encrypted_content: z.string().nullish()
|
|
2937
|
+
}),
|
|
2938
|
+
z.object({
|
|
2939
|
+
type: z.literal("function_call"),
|
|
2940
|
+
id: z.string(),
|
|
2941
|
+
call_id: z.string(),
|
|
2942
|
+
name: z.string(),
|
|
2943
|
+
arguments: z.string(),
|
|
2944
|
+
status: z.literal("completed")
|
|
2945
|
+
}),
|
|
2946
|
+
z.object({
|
|
2947
|
+
type: z.literal("web_search_call"),
|
|
2948
|
+
id: z.string(),
|
|
2949
|
+
status: z.literal("completed"),
|
|
2950
|
+
action: z.object({
|
|
2951
|
+
type: z.literal("search"),
|
|
2952
|
+
query: z.string().optional()
|
|
2953
|
+
}).nullish()
|
|
2954
|
+
}),
|
|
2955
|
+
z.object({
|
|
2956
|
+
type: z.literal("computer_call"),
|
|
2957
|
+
id: z.string(),
|
|
2958
|
+
status: z.literal("completed")
|
|
2959
|
+
}),
|
|
2960
|
+
z.object({
|
|
2961
|
+
type: z.literal("file_search_call"),
|
|
2962
|
+
id: z.string(),
|
|
2963
|
+
status: z.literal("completed"),
|
|
2964
|
+
queries: z.array(z.string()).nullish(),
|
|
2965
|
+
results: z.array(
|
|
2966
|
+
z.object({
|
|
2967
|
+
attributes: z.object({
|
|
2968
|
+
file_id: z.string(),
|
|
2969
|
+
filename: z.string(),
|
|
2970
|
+
score: z.number(),
|
|
2971
|
+
text: z.string()
|
|
2972
|
+
})
|
|
2973
|
+
})
|
|
2974
|
+
).nullish()
|
|
2975
|
+
})
|
|
2976
|
+
])
|
|
2977
|
+
});
|
|
2978
|
+
var responseFunctionCallArgumentsDeltaSchema = z.object({
|
|
2979
|
+
type: z.literal("response.function_call_arguments.delta"),
|
|
2980
|
+
item_id: z.string(),
|
|
2981
|
+
output_index: z.number(),
|
|
2982
|
+
delta: z.string()
|
|
2983
|
+
});
|
|
2984
|
+
var responseAnnotationAddedSchema = z.object({
|
|
2985
|
+
type: z.literal("response.output_text.annotation.added"),
|
|
2986
|
+
annotation: z.discriminatedUnion("type", [
|
|
2987
|
+
z.object({
|
|
2988
|
+
type: z.literal("url_citation"),
|
|
2989
|
+
url: z.string(),
|
|
2990
|
+
title: z.string()
|
|
2991
|
+
}),
|
|
2992
|
+
z.object({
|
|
2993
|
+
type: z.literal("file_citation"),
|
|
2994
|
+
file_id: z.string(),
|
|
2995
|
+
quote: z.string()
|
|
2996
|
+
})
|
|
2997
|
+
])
|
|
2998
|
+
});
|
|
2999
|
+
var responseReasoningSummaryPartAddedSchema = z.object({
|
|
3000
|
+
type: z.literal("response.reasoning_summary_part.added"),
|
|
3001
|
+
item_id: z.string(),
|
|
3002
|
+
summary_index: z.number()
|
|
3003
|
+
});
|
|
3004
|
+
var responseReasoningSummaryTextDeltaSchema = z.object({
|
|
3005
|
+
type: z.literal("response.reasoning_summary_text.delta"),
|
|
3006
|
+
item_id: z.string(),
|
|
3007
|
+
summary_index: z.number(),
|
|
3008
|
+
delta: z.string()
|
|
3009
|
+
});
|
|
3010
|
+
var openaiResponsesChunkSchema = z.union([
|
|
3011
|
+
textDeltaChunkSchema,
|
|
3012
|
+
responseFinishedChunkSchema,
|
|
3013
|
+
responseCreatedChunkSchema,
|
|
3014
|
+
responseOutputItemAddedSchema,
|
|
3015
|
+
responseOutputItemDoneSchema,
|
|
3016
|
+
responseFunctionCallArgumentsDeltaSchema,
|
|
3017
|
+
responseAnnotationAddedSchema,
|
|
3018
|
+
responseReasoningSummaryPartAddedSchema,
|
|
3019
|
+
responseReasoningSummaryTextDeltaSchema,
|
|
3020
|
+
errorChunkSchema,
|
|
3021
|
+
z.object({ type: z.string() }).loose()
|
|
3022
|
+
// fallback for unknown chunks
|
|
3023
|
+
]);
|
|
3024
|
+
function isTextDeltaChunk(chunk) {
|
|
3025
|
+
return chunk.type === "response.output_text.delta";
|
|
3026
|
+
}
|
|
3027
|
+
function isResponseOutputItemDoneChunk(chunk) {
|
|
3028
|
+
return chunk.type === "response.output_item.done";
|
|
3029
|
+
}
|
|
3030
|
+
function isResponseOutputItemDoneReasoningChunk(chunk) {
|
|
3031
|
+
return isResponseOutputItemDoneChunk(chunk) && chunk.item.type === "reasoning";
|
|
3032
|
+
}
|
|
3033
|
+
function isResponseFinishedChunk(chunk) {
|
|
3034
|
+
return chunk.type === "response.completed" || chunk.type === "response.incomplete";
|
|
3035
|
+
}
|
|
3036
|
+
function isResponseCreatedChunk(chunk) {
|
|
3037
|
+
return chunk.type === "response.created";
|
|
3038
|
+
}
|
|
3039
|
+
function isResponseFunctionCallArgumentsDeltaChunk(chunk) {
|
|
3040
|
+
return chunk.type === "response.function_call_arguments.delta";
|
|
3041
|
+
}
|
|
3042
|
+
function isResponseOutputItemAddedChunk(chunk) {
|
|
3043
|
+
return chunk.type === "response.output_item.added";
|
|
3044
|
+
}
|
|
3045
|
+
function isResponseOutputItemAddedReasoningChunk(chunk) {
|
|
3046
|
+
return isResponseOutputItemAddedChunk(chunk) && chunk.item.type === "reasoning";
|
|
3047
|
+
}
|
|
3048
|
+
function isResponseAnnotationAddedChunk(chunk) {
|
|
3049
|
+
return chunk.type === "response.output_text.annotation.added";
|
|
3050
|
+
}
|
|
3051
|
+
function isResponseReasoningSummaryPartAddedChunk(chunk) {
|
|
3052
|
+
return chunk.type === "response.reasoning_summary_part.added";
|
|
3053
|
+
}
|
|
3054
|
+
function isResponseReasoningSummaryTextDeltaChunk(chunk) {
|
|
3055
|
+
return chunk.type === "response.reasoning_summary_text.delta";
|
|
3056
|
+
}
|
|
3057
|
+
function isErrorChunk(chunk) {
|
|
3058
|
+
return chunk.type === "error";
|
|
3059
|
+
}
|
|
3060
|
+
function getResponsesModelConfig(modelId) {
|
|
3061
|
+
const supportsFlexProcessing2 = modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-chat");
|
|
3062
|
+
const supportsPriorityProcessing2 = modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") && !modelId.startsWith("gpt-5-chat") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
|
|
3063
|
+
const defaults = {
|
|
3064
|
+
requiredAutoTruncation: false,
|
|
3065
|
+
systemMessageMode: "system",
|
|
3066
|
+
supportsFlexProcessing: supportsFlexProcessing2,
|
|
3067
|
+
supportsPriorityProcessing: supportsPriorityProcessing2
|
|
3068
|
+
};
|
|
3069
|
+
if (modelId.startsWith("gpt-5-chat")) {
|
|
3070
|
+
return {
|
|
3071
|
+
...defaults,
|
|
3072
|
+
isReasoningModel: false
|
|
3073
|
+
};
|
|
3074
|
+
}
|
|
3075
|
+
if (modelId.startsWith("o") || modelId.startsWith("gpt-5") || modelId.startsWith("codex-") || modelId.startsWith("computer-use")) {
|
|
3076
|
+
if (modelId.startsWith("o1-mini") || modelId.startsWith("o1-preview")) {
|
|
3077
|
+
return {
|
|
3078
|
+
...defaults,
|
|
3079
|
+
isReasoningModel: true,
|
|
3080
|
+
systemMessageMode: "remove"
|
|
3081
|
+
};
|
|
3082
|
+
}
|
|
3083
|
+
return {
|
|
3084
|
+
...defaults,
|
|
3085
|
+
isReasoningModel: true,
|
|
3086
|
+
systemMessageMode: "developer"
|
|
3087
|
+
};
|
|
3088
|
+
}
|
|
3089
|
+
return {
|
|
3090
|
+
...defaults,
|
|
3091
|
+
isReasoningModel: false
|
|
3092
|
+
};
|
|
3093
|
+
}
|
|
3094
|
+
var openaiResponsesProviderOptionsSchema = z.object({
|
|
3095
|
+
metadata: z.any().nullish(),
|
|
3096
|
+
parallelToolCalls: z.boolean().nullish(),
|
|
3097
|
+
previousResponseId: z.string().nullish(),
|
|
3098
|
+
store: z.boolean().nullish(),
|
|
3099
|
+
user: z.string().nullish(),
|
|
3100
|
+
reasoningEffort: z.string().nullish(),
|
|
3101
|
+
strictJsonSchema: z.boolean().nullish(),
|
|
3102
|
+
instructions: z.string().nullish(),
|
|
3103
|
+
reasoningSummary: z.string().nullish(),
|
|
3104
|
+
serviceTier: z.enum(["auto", "flex", "priority"]).nullish(),
|
|
3105
|
+
include: z.array(
|
|
3106
|
+
z.enum([
|
|
3107
|
+
"reasoning.encrypted_content",
|
|
3108
|
+
"file_search_call.results",
|
|
3109
|
+
"message.output_text.logprobs"
|
|
3110
|
+
])
|
|
3111
|
+
).nullish(),
|
|
3112
|
+
textVerbosity: z.enum(["low", "medium", "high"]).nullish(),
|
|
3113
|
+
promptCacheKey: z.string().nullish(),
|
|
3114
|
+
safetyIdentifier: z.string().nullish(),
|
|
3115
|
+
/**
|
|
3116
|
+
* Return the log probabilities of the tokens.
|
|
3117
|
+
*
|
|
3118
|
+
* Setting to true will return the log probabilities of the tokens that
|
|
3119
|
+
* were generated.
|
|
3120
|
+
*
|
|
3121
|
+
* Setting to a number will return the log probabilities of the top n
|
|
3122
|
+
* tokens that were generated.
|
|
3123
|
+
*
|
|
3124
|
+
* @see https://platform.openai.com/docs/api-reference/responses/create
|
|
3125
|
+
* @see https://cookbook.openai.com/examples/using_logprobs
|
|
3126
|
+
*/
|
|
3127
|
+
logprobs: z.union([z.boolean(), z.number().min(1).max(TOP_LOGPROBS_MAX)]).optional()
|
|
3128
|
+
});
|
|
3129
|
+
var OpenAIProviderOptionsSchema = z.object({
|
|
3130
|
+
instructions: z.string().nullish(),
|
|
3131
|
+
speed: z.number().min(0.25).max(4).default(1).nullish()
|
|
3132
|
+
});
|
|
3133
|
+
var OpenAISpeechModel = class {
|
|
3134
|
+
constructor(modelId, config) {
|
|
3135
|
+
this.modelId = modelId;
|
|
3136
|
+
this.config = config;
|
|
3137
|
+
this.specificationVersion = "v2";
|
|
3138
|
+
}
|
|
3139
|
+
get provider() {
|
|
3140
|
+
return this.config.provider;
|
|
3141
|
+
}
|
|
3142
|
+
async getArgs({
|
|
3143
|
+
text,
|
|
3144
|
+
voice = "alloy",
|
|
3145
|
+
outputFormat = "mp3",
|
|
3146
|
+
speed,
|
|
3147
|
+
instructions,
|
|
3148
|
+
language,
|
|
3149
|
+
providerOptions
|
|
3150
|
+
}) {
|
|
3151
|
+
const warnings = [];
|
|
3152
|
+
const openAIOptions = await parseProviderOptions({
|
|
3153
|
+
provider: "openai",
|
|
3154
|
+
providerOptions,
|
|
3155
|
+
schema: OpenAIProviderOptionsSchema
|
|
3156
|
+
});
|
|
3157
|
+
const requestBody = {
|
|
3158
|
+
model: this.modelId,
|
|
3159
|
+
input: text,
|
|
3160
|
+
voice,
|
|
3161
|
+
response_format: "mp3",
|
|
3162
|
+
speed,
|
|
3163
|
+
instructions
|
|
3164
|
+
};
|
|
3165
|
+
if (outputFormat) {
|
|
3166
|
+
if (["mp3", "opus", "aac", "flac", "wav", "pcm"].includes(outputFormat)) {
|
|
3167
|
+
requestBody.response_format = outputFormat;
|
|
3168
|
+
} else {
|
|
3169
|
+
warnings.push({
|
|
3170
|
+
type: "unsupported-setting",
|
|
3171
|
+
setting: "outputFormat",
|
|
3172
|
+
details: `Unsupported output format: ${outputFormat}. Using mp3 instead.`
|
|
3173
|
+
});
|
|
3174
|
+
}
|
|
3175
|
+
}
|
|
3176
|
+
if (openAIOptions) {
|
|
3177
|
+
const speechModelOptions = {};
|
|
3178
|
+
for (const key in speechModelOptions) {
|
|
3179
|
+
const value = speechModelOptions[key];
|
|
3180
|
+
if (value !== void 0) {
|
|
3181
|
+
requestBody[key] = value;
|
|
3182
|
+
}
|
|
3183
|
+
}
|
|
3184
|
+
}
|
|
3185
|
+
if (language) {
|
|
3186
|
+
warnings.push({
|
|
3187
|
+
type: "unsupported-setting",
|
|
3188
|
+
setting: "language",
|
|
3189
|
+
details: `OpenAI speech models do not support language selection. Language parameter "${language}" was ignored.`
|
|
3190
|
+
});
|
|
3191
|
+
}
|
|
3192
|
+
return {
|
|
3193
|
+
requestBody,
|
|
3194
|
+
warnings
|
|
3195
|
+
};
|
|
3196
|
+
}
|
|
3197
|
+
async doGenerate(options) {
|
|
3198
|
+
var _a, _b, _c;
|
|
3199
|
+
const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
|
|
3200
|
+
const { requestBody, warnings } = await this.getArgs(options);
|
|
3201
|
+
const {
|
|
3202
|
+
value: audio,
|
|
3203
|
+
responseHeaders,
|
|
3204
|
+
rawValue: rawResponse
|
|
3205
|
+
} = await postJsonToApi({
|
|
3206
|
+
url: this.config.url({
|
|
3207
|
+
path: "/audio/speech",
|
|
3208
|
+
modelId: this.modelId
|
|
3209
|
+
}),
|
|
3210
|
+
headers: combineHeaders(this.config.headers(), options.headers),
|
|
3211
|
+
body: requestBody,
|
|
3212
|
+
failedResponseHandler: openaiFailedResponseHandler,
|
|
3213
|
+
successfulResponseHandler: createBinaryResponseHandler(),
|
|
3214
|
+
abortSignal: options.abortSignal,
|
|
3215
|
+
fetch: this.config.fetch
|
|
3216
|
+
});
|
|
3217
|
+
return {
|
|
3218
|
+
audio,
|
|
3219
|
+
warnings,
|
|
3220
|
+
request: {
|
|
3221
|
+
body: JSON.stringify(requestBody)
|
|
3222
|
+
},
|
|
3223
|
+
response: {
|
|
3224
|
+
timestamp: currentDate,
|
|
3225
|
+
modelId: this.modelId,
|
|
3226
|
+
headers: responseHeaders,
|
|
3227
|
+
body: rawResponse
|
|
3228
|
+
}
|
|
3229
|
+
};
|
|
3230
|
+
}
|
|
3231
|
+
};
|
|
3232
|
+
var openAITranscriptionProviderOptions = z.object({
|
|
3233
|
+
/**
|
|
3234
|
+
* Additional information to include in the transcription response.
|
|
3235
|
+
*/
|
|
3236
|
+
include: z.array(z.string()).optional(),
|
|
3237
|
+
/**
|
|
3238
|
+
* The language of the input audio in ISO-639-1 format.
|
|
3239
|
+
*/
|
|
3240
|
+
language: z.string().optional(),
|
|
3241
|
+
/**
|
|
3242
|
+
* An optional text to guide the model's style or continue a previous audio segment.
|
|
3243
|
+
*/
|
|
3244
|
+
prompt: z.string().optional(),
|
|
3245
|
+
/**
|
|
3246
|
+
* The sampling temperature, between 0 and 1.
|
|
3247
|
+
* @default 0
|
|
3248
|
+
*/
|
|
3249
|
+
temperature: z.number().min(0).max(1).default(0).optional(),
|
|
3250
|
+
/**
|
|
3251
|
+
* The timestamp granularities to populate for this transcription.
|
|
3252
|
+
* @default ['segment']
|
|
3253
|
+
*/
|
|
3254
|
+
timestampGranularities: z.array(z.enum(["word", "segment"])).default(["segment"]).optional()
|
|
3255
|
+
});
|
|
3256
|
+
var languageMap = {
|
|
3257
|
+
afrikaans: "af",
|
|
3258
|
+
arabic: "ar",
|
|
3259
|
+
armenian: "hy",
|
|
3260
|
+
azerbaijani: "az",
|
|
3261
|
+
belarusian: "be",
|
|
3262
|
+
bosnian: "bs",
|
|
3263
|
+
bulgarian: "bg",
|
|
3264
|
+
catalan: "ca",
|
|
3265
|
+
chinese: "zh",
|
|
3266
|
+
croatian: "hr",
|
|
3267
|
+
czech: "cs",
|
|
3268
|
+
danish: "da",
|
|
3269
|
+
dutch: "nl",
|
|
3270
|
+
english: "en",
|
|
3271
|
+
estonian: "et",
|
|
3272
|
+
finnish: "fi",
|
|
3273
|
+
french: "fr",
|
|
3274
|
+
galician: "gl",
|
|
3275
|
+
german: "de",
|
|
3276
|
+
greek: "el",
|
|
3277
|
+
hebrew: "he",
|
|
3278
|
+
hindi: "hi",
|
|
3279
|
+
hungarian: "hu",
|
|
3280
|
+
icelandic: "is",
|
|
3281
|
+
indonesian: "id",
|
|
3282
|
+
italian: "it",
|
|
3283
|
+
japanese: "ja",
|
|
3284
|
+
kannada: "kn",
|
|
3285
|
+
kazakh: "kk",
|
|
3286
|
+
korean: "ko",
|
|
3287
|
+
latvian: "lv",
|
|
3288
|
+
lithuanian: "lt",
|
|
3289
|
+
macedonian: "mk",
|
|
3290
|
+
malay: "ms",
|
|
3291
|
+
marathi: "mr",
|
|
3292
|
+
maori: "mi",
|
|
3293
|
+
nepali: "ne",
|
|
3294
|
+
norwegian: "no",
|
|
3295
|
+
persian: "fa",
|
|
3296
|
+
polish: "pl",
|
|
3297
|
+
portuguese: "pt",
|
|
3298
|
+
romanian: "ro",
|
|
3299
|
+
russian: "ru",
|
|
3300
|
+
serbian: "sr",
|
|
3301
|
+
slovak: "sk",
|
|
3302
|
+
slovenian: "sl",
|
|
3303
|
+
spanish: "es",
|
|
3304
|
+
swahili: "sw",
|
|
3305
|
+
swedish: "sv",
|
|
3306
|
+
tagalog: "tl",
|
|
3307
|
+
tamil: "ta",
|
|
3308
|
+
thai: "th",
|
|
3309
|
+
turkish: "tr",
|
|
3310
|
+
ukrainian: "uk",
|
|
3311
|
+
urdu: "ur",
|
|
3312
|
+
vietnamese: "vi",
|
|
3313
|
+
welsh: "cy"
|
|
3314
|
+
};
|
|
3315
|
+
var OpenAITranscriptionModel = class {
|
|
3316
|
+
constructor(modelId, config) {
|
|
3317
|
+
this.modelId = modelId;
|
|
3318
|
+
this.config = config;
|
|
3319
|
+
this.specificationVersion = "v2";
|
|
3320
|
+
}
|
|
3321
|
+
get provider() {
|
|
3322
|
+
return this.config.provider;
|
|
3323
|
+
}
|
|
3324
|
+
async getArgs({
|
|
3325
|
+
audio,
|
|
3326
|
+
mediaType,
|
|
3327
|
+
providerOptions
|
|
3328
|
+
}) {
|
|
3329
|
+
const warnings = [];
|
|
3330
|
+
const openAIOptions = await parseProviderOptions({
|
|
3331
|
+
provider: "openai",
|
|
3332
|
+
providerOptions,
|
|
3333
|
+
schema: openAITranscriptionProviderOptions
|
|
3334
|
+
});
|
|
3335
|
+
const formData = new FormData();
|
|
3336
|
+
const blob = audio instanceof Uint8Array ? new Blob([audio]) : new Blob([convertBase64ToUint8Array(audio)]);
|
|
3337
|
+
formData.append("model", this.modelId);
|
|
3338
|
+
formData.append("file", new File([blob], "audio", { type: mediaType }));
|
|
3339
|
+
if (openAIOptions) {
|
|
3340
|
+
const transcriptionModelOptions = {
|
|
3341
|
+
include: openAIOptions.include,
|
|
3342
|
+
language: openAIOptions.language,
|
|
3343
|
+
prompt: openAIOptions.prompt,
|
|
3344
|
+
response_format: "verbose_json",
|
|
3345
|
+
// always use verbose_json to get segments
|
|
3346
|
+
temperature: openAIOptions.temperature,
|
|
3347
|
+
timestamp_granularities: openAIOptions.timestampGranularities
|
|
3348
|
+
};
|
|
3349
|
+
for (const [key, value] of Object.entries(transcriptionModelOptions)) {
|
|
3350
|
+
if (value != null) {
|
|
3351
|
+
formData.append(key, String(value));
|
|
3352
|
+
}
|
|
3353
|
+
}
|
|
3354
|
+
}
|
|
3355
|
+
return {
|
|
3356
|
+
formData,
|
|
3357
|
+
warnings
|
|
3358
|
+
};
|
|
3359
|
+
}
|
|
3360
|
+
async doGenerate(options) {
|
|
3361
|
+
var _a, _b, _c, _d, _e, _f, _g, _h;
|
|
3362
|
+
const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
|
|
3363
|
+
const { formData, warnings } = await this.getArgs(options);
|
|
3364
|
+
const {
|
|
3365
|
+
value: response,
|
|
3366
|
+
responseHeaders,
|
|
3367
|
+
rawValue: rawResponse
|
|
3368
|
+
} = await postFormDataToApi({
|
|
3369
|
+
url: this.config.url({
|
|
3370
|
+
path: "/audio/transcriptions",
|
|
3371
|
+
modelId: this.modelId
|
|
3372
|
+
}),
|
|
3373
|
+
headers: combineHeaders(this.config.headers(), options.headers),
|
|
3374
|
+
formData,
|
|
3375
|
+
failedResponseHandler: openaiFailedResponseHandler,
|
|
3376
|
+
successfulResponseHandler: createJsonResponseHandler(
|
|
3377
|
+
openaiTranscriptionResponseSchema
|
|
3378
|
+
),
|
|
3379
|
+
abortSignal: options.abortSignal,
|
|
3380
|
+
fetch: this.config.fetch
|
|
3381
|
+
});
|
|
3382
|
+
const language = response.language != null && response.language in languageMap ? languageMap[response.language] : void 0;
|
|
3383
|
+
return {
|
|
3384
|
+
text: response.text,
|
|
3385
|
+
segments: (_g = (_f = (_d = response.segments) == null ? void 0 : _d.map((segment) => ({
|
|
3386
|
+
text: segment.text,
|
|
3387
|
+
startSecond: segment.start,
|
|
3388
|
+
endSecond: segment.end
|
|
3389
|
+
}))) != null ? _f : (_e = response.words) == null ? void 0 : _e.map((word) => ({
|
|
3390
|
+
text: word.word,
|
|
3391
|
+
startSecond: word.start,
|
|
3392
|
+
endSecond: word.end
|
|
3393
|
+
}))) != null ? _g : [],
|
|
3394
|
+
language,
|
|
3395
|
+
durationInSeconds: (_h = response.duration) != null ? _h : void 0,
|
|
3396
|
+
warnings,
|
|
3397
|
+
response: {
|
|
3398
|
+
timestamp: currentDate,
|
|
3399
|
+
modelId: this.modelId,
|
|
3400
|
+
headers: responseHeaders,
|
|
3401
|
+
body: rawResponse
|
|
3402
|
+
}
|
|
3403
|
+
};
|
|
3404
|
+
}
|
|
3405
|
+
};
|
|
3406
|
+
var openaiTranscriptionResponseSchema = z.object({
|
|
3407
|
+
text: z.string(),
|
|
3408
|
+
language: z.string().nullish(),
|
|
3409
|
+
duration: z.number().nullish(),
|
|
3410
|
+
words: z.array(
|
|
3411
|
+
z.object({
|
|
3412
|
+
word: z.string(),
|
|
3413
|
+
start: z.number(),
|
|
3414
|
+
end: z.number()
|
|
3415
|
+
})
|
|
3416
|
+
).nullish(),
|
|
3417
|
+
segments: z.array(
|
|
3418
|
+
z.object({
|
|
3419
|
+
id: z.number(),
|
|
3420
|
+
seek: z.number(),
|
|
3421
|
+
start: z.number(),
|
|
3422
|
+
end: z.number(),
|
|
3423
|
+
text: z.string(),
|
|
3424
|
+
tokens: z.array(z.number()),
|
|
3425
|
+
temperature: z.number(),
|
|
3426
|
+
avg_logprob: z.number(),
|
|
3427
|
+
compression_ratio: z.number(),
|
|
3428
|
+
no_speech_prob: z.number()
|
|
3429
|
+
})
|
|
3430
|
+
).nullish()
|
|
3431
|
+
});
|
|
3432
|
+
function createOpenAI(options = {}) {
|
|
3433
|
+
var _a, _b;
|
|
3434
|
+
const baseURL = (_a = withoutTrailingSlash(options.baseURL)) != null ? _a : "https://api.openai.com/v1";
|
|
3435
|
+
const providerName = (_b = options.name) != null ? _b : "openai";
|
|
3436
|
+
const getHeaders = () => ({
|
|
3437
|
+
Authorization: `Bearer ${loadApiKey({
|
|
3438
|
+
apiKey: options.apiKey,
|
|
3439
|
+
environmentVariableName: "OPENAI_API_KEY",
|
|
3440
|
+
description: "OpenAI"
|
|
3441
|
+
})}`,
|
|
3442
|
+
"OpenAI-Organization": options.organization,
|
|
3443
|
+
"OpenAI-Project": options.project,
|
|
3444
|
+
...options.headers
|
|
3445
|
+
});
|
|
3446
|
+
const createChatModel = (modelId) => new OpenAIChatLanguageModel(modelId, {
|
|
3447
|
+
provider: `${providerName}.chat`,
|
|
3448
|
+
url: ({ path }) => `${baseURL}${path}`,
|
|
3449
|
+
headers: getHeaders,
|
|
3450
|
+
fetch: options.fetch
|
|
3451
|
+
});
|
|
3452
|
+
const createCompletionModel = (modelId) => new OpenAICompletionLanguageModel(modelId, {
|
|
3453
|
+
provider: `${providerName}.completion`,
|
|
3454
|
+
url: ({ path }) => `${baseURL}${path}`,
|
|
3455
|
+
headers: getHeaders,
|
|
3456
|
+
fetch: options.fetch
|
|
3457
|
+
});
|
|
3458
|
+
const createEmbeddingModel = (modelId) => new OpenAIEmbeddingModel(modelId, {
|
|
3459
|
+
provider: `${providerName}.embedding`,
|
|
3460
|
+
url: ({ path }) => `${baseURL}${path}`,
|
|
3461
|
+
headers: getHeaders,
|
|
3462
|
+
fetch: options.fetch
|
|
3463
|
+
});
|
|
3464
|
+
const createImageModel = (modelId) => new OpenAIImageModel(modelId, {
|
|
3465
|
+
provider: `${providerName}.image`,
|
|
3466
|
+
url: ({ path }) => `${baseURL}${path}`,
|
|
3467
|
+
headers: getHeaders,
|
|
3468
|
+
fetch: options.fetch
|
|
3469
|
+
});
|
|
3470
|
+
const createTranscriptionModel = (modelId) => new OpenAITranscriptionModel(modelId, {
|
|
3471
|
+
provider: `${providerName}.transcription`,
|
|
3472
|
+
url: ({ path }) => `${baseURL}${path}`,
|
|
3473
|
+
headers: getHeaders,
|
|
3474
|
+
fetch: options.fetch
|
|
3475
|
+
});
|
|
3476
|
+
const createSpeechModel = (modelId) => new OpenAISpeechModel(modelId, {
|
|
3477
|
+
provider: `${providerName}.speech`,
|
|
3478
|
+
url: ({ path }) => `${baseURL}${path}`,
|
|
3479
|
+
headers: getHeaders,
|
|
3480
|
+
fetch: options.fetch
|
|
3481
|
+
});
|
|
3482
|
+
const createLanguageModel = (modelId) => {
|
|
3483
|
+
if (new.target) {
|
|
3484
|
+
throw new Error(
|
|
3485
|
+
"The OpenAI model function cannot be called with the new keyword."
|
|
3486
|
+
);
|
|
3487
|
+
}
|
|
3488
|
+
return createResponsesModel(modelId);
|
|
3489
|
+
};
|
|
3490
|
+
const createResponsesModel = (modelId) => {
|
|
3491
|
+
return new OpenAIResponsesLanguageModel(modelId, {
|
|
3492
|
+
provider: `${providerName}.responses`,
|
|
3493
|
+
url: ({ path }) => `${baseURL}${path}`,
|
|
3494
|
+
headers: getHeaders,
|
|
3495
|
+
fetch: options.fetch,
|
|
3496
|
+
fileIdPrefixes: ["file-"]
|
|
3497
|
+
});
|
|
3498
|
+
};
|
|
3499
|
+
const provider = function(modelId) {
|
|
3500
|
+
return createLanguageModel(modelId);
|
|
3501
|
+
};
|
|
3502
|
+
provider.languageModel = createLanguageModel;
|
|
3503
|
+
provider.chat = createChatModel;
|
|
3504
|
+
provider.completion = createCompletionModel;
|
|
3505
|
+
provider.responses = createResponsesModel;
|
|
3506
|
+
provider.embedding = createEmbeddingModel;
|
|
3507
|
+
provider.textEmbedding = createEmbeddingModel;
|
|
3508
|
+
provider.textEmbeddingModel = createEmbeddingModel;
|
|
3509
|
+
provider.image = createImageModel;
|
|
3510
|
+
provider.imageModel = createImageModel;
|
|
3511
|
+
provider.transcription = createTranscriptionModel;
|
|
3512
|
+
provider.transcriptionModel = createTranscriptionModel;
|
|
3513
|
+
provider.speech = createSpeechModel;
|
|
3514
|
+
provider.speechModel = createSpeechModel;
|
|
3515
|
+
provider.tools = openaiTools;
|
|
3516
|
+
return provider;
|
|
3517
|
+
}
|
|
3518
|
+
var openai = createOpenAI();
|
|
3519
|
+
|
|
3520
|
+
export { createOpenAI, openai };
|
|
3521
|
+
//# sourceMappingURL=chunk-4RRMWXQ2.js.map
|
|
3522
|
+
//# sourceMappingURL=chunk-4RRMWXQ2.js.map
|