@ai-sdk/openai 3.0.0-beta.88 → 3.0.0-beta.90
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +16 -0
- package/dist/index.js +154 -78
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +154 -78
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.js +153 -77
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +153 -77
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +3 -3
package/dist/index.mjs
CHANGED
|
@@ -55,6 +55,45 @@ function getOpenAILanguageModelCapabilities(modelId) {
|
|
|
55
55
|
};
|
|
56
56
|
}
|
|
57
57
|
|
|
58
|
+
// src/chat/convert-openai-chat-usage.ts
|
|
59
|
+
function convertOpenAIChatUsage(usage) {
|
|
60
|
+
var _a, _b, _c, _d, _e, _f;
|
|
61
|
+
if (usage == null) {
|
|
62
|
+
return {
|
|
63
|
+
inputTokens: {
|
|
64
|
+
total: void 0,
|
|
65
|
+
noCache: void 0,
|
|
66
|
+
cacheRead: void 0,
|
|
67
|
+
cacheWrite: void 0
|
|
68
|
+
},
|
|
69
|
+
outputTokens: {
|
|
70
|
+
total: void 0,
|
|
71
|
+
text: void 0,
|
|
72
|
+
reasoning: void 0
|
|
73
|
+
},
|
|
74
|
+
raw: void 0
|
|
75
|
+
};
|
|
76
|
+
}
|
|
77
|
+
const promptTokens = (_a = usage.prompt_tokens) != null ? _a : 0;
|
|
78
|
+
const completionTokens = (_b = usage.completion_tokens) != null ? _b : 0;
|
|
79
|
+
const cachedTokens = (_d = (_c = usage.prompt_tokens_details) == null ? void 0 : _c.cached_tokens) != null ? _d : 0;
|
|
80
|
+
const reasoningTokens = (_f = (_e = usage.completion_tokens_details) == null ? void 0 : _e.reasoning_tokens) != null ? _f : 0;
|
|
81
|
+
return {
|
|
82
|
+
inputTokens: {
|
|
83
|
+
total: promptTokens,
|
|
84
|
+
noCache: promptTokens - cachedTokens,
|
|
85
|
+
cacheRead: cachedTokens,
|
|
86
|
+
cacheWrite: void 0
|
|
87
|
+
},
|
|
88
|
+
outputTokens: {
|
|
89
|
+
total: completionTokens,
|
|
90
|
+
text: completionTokens - reasoningTokens,
|
|
91
|
+
reasoning: reasoningTokens
|
|
92
|
+
},
|
|
93
|
+
raw: usage
|
|
94
|
+
};
|
|
95
|
+
}
|
|
96
|
+
|
|
58
97
|
// src/chat/convert-to-openai-chat-messages.ts
|
|
59
98
|
import {
|
|
60
99
|
UnsupportedFunctionalityError
|
|
@@ -765,7 +804,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
765
804
|
};
|
|
766
805
|
}
|
|
767
806
|
async doGenerate(options) {
|
|
768
|
-
var _a, _b, _c, _d, _e, _f
|
|
807
|
+
var _a, _b, _c, _d, _e, _f;
|
|
769
808
|
const { args: body, warnings } = await this.getArgs(options);
|
|
770
809
|
const {
|
|
771
810
|
responseHeaders,
|
|
@@ -823,13 +862,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
823
862
|
return {
|
|
824
863
|
content,
|
|
825
864
|
finishReason: mapOpenAIFinishReason(choice.finish_reason),
|
|
826
|
-
usage:
|
|
827
|
-
inputTokens: (_h = (_g = response.usage) == null ? void 0 : _g.prompt_tokens) != null ? _h : void 0,
|
|
828
|
-
outputTokens: (_j = (_i = response.usage) == null ? void 0 : _i.completion_tokens) != null ? _j : void 0,
|
|
829
|
-
totalTokens: (_l = (_k = response.usage) == null ? void 0 : _k.total_tokens) != null ? _l : void 0,
|
|
830
|
-
reasoningTokens: (_m = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null ? _m : void 0,
|
|
831
|
-
cachedInputTokens: (_n = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens) != null ? _n : void 0
|
|
832
|
-
},
|
|
865
|
+
usage: convertOpenAIChatUsage(response.usage),
|
|
833
866
|
request: { body },
|
|
834
867
|
response: {
|
|
835
868
|
...getResponseMetadata(response),
|
|
@@ -865,11 +898,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
865
898
|
});
|
|
866
899
|
const toolCalls = [];
|
|
867
900
|
let finishReason = "unknown";
|
|
868
|
-
|
|
869
|
-
inputTokens: void 0,
|
|
870
|
-
outputTokens: void 0,
|
|
871
|
-
totalTokens: void 0
|
|
872
|
-
};
|
|
901
|
+
let usage = void 0;
|
|
873
902
|
let metadataExtracted = false;
|
|
874
903
|
let isActiveText = false;
|
|
875
904
|
const providerMetadata = { openai: {} };
|
|
@@ -880,7 +909,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
880
909
|
controller.enqueue({ type: "stream-start", warnings });
|
|
881
910
|
},
|
|
882
911
|
transform(chunk, controller) {
|
|
883
|
-
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q
|
|
912
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q;
|
|
884
913
|
if (options.includeRawChunks) {
|
|
885
914
|
controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
|
|
886
915
|
}
|
|
@@ -906,23 +935,19 @@ var OpenAIChatLanguageModel = class {
|
|
|
906
935
|
}
|
|
907
936
|
}
|
|
908
937
|
if (value.usage != null) {
|
|
909
|
-
usage
|
|
910
|
-
|
|
911
|
-
|
|
912
|
-
usage.reasoningTokens = (_e = (_d = value.usage.completion_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : void 0;
|
|
913
|
-
usage.cachedInputTokens = (_g = (_f = value.usage.prompt_tokens_details) == null ? void 0 : _f.cached_tokens) != null ? _g : void 0;
|
|
914
|
-
if (((_h = value.usage.completion_tokens_details) == null ? void 0 : _h.accepted_prediction_tokens) != null) {
|
|
915
|
-
providerMetadata.openai.acceptedPredictionTokens = (_i = value.usage.completion_tokens_details) == null ? void 0 : _i.accepted_prediction_tokens;
|
|
938
|
+
usage = value.usage;
|
|
939
|
+
if (((_a = value.usage.completion_tokens_details) == null ? void 0 : _a.accepted_prediction_tokens) != null) {
|
|
940
|
+
providerMetadata.openai.acceptedPredictionTokens = (_b = value.usage.completion_tokens_details) == null ? void 0 : _b.accepted_prediction_tokens;
|
|
916
941
|
}
|
|
917
|
-
if (((
|
|
918
|
-
providerMetadata.openai.rejectedPredictionTokens = (
|
|
942
|
+
if (((_c = value.usage.completion_tokens_details) == null ? void 0 : _c.rejected_prediction_tokens) != null) {
|
|
943
|
+
providerMetadata.openai.rejectedPredictionTokens = (_d = value.usage.completion_tokens_details) == null ? void 0 : _d.rejected_prediction_tokens;
|
|
919
944
|
}
|
|
920
945
|
}
|
|
921
946
|
const choice = value.choices[0];
|
|
922
947
|
if ((choice == null ? void 0 : choice.finish_reason) != null) {
|
|
923
948
|
finishReason = mapOpenAIFinishReason(choice.finish_reason);
|
|
924
949
|
}
|
|
925
|
-
if (((
|
|
950
|
+
if (((_e = choice == null ? void 0 : choice.logprobs) == null ? void 0 : _e.content) != null) {
|
|
926
951
|
providerMetadata.openai.logprobs = choice.logprobs.content;
|
|
927
952
|
}
|
|
928
953
|
if ((choice == null ? void 0 : choice.delta) == null) {
|
|
@@ -956,7 +981,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
956
981
|
message: `Expected 'id' to be a string.`
|
|
957
982
|
});
|
|
958
983
|
}
|
|
959
|
-
if (((
|
|
984
|
+
if (((_f = toolCallDelta.function) == null ? void 0 : _f.name) == null) {
|
|
960
985
|
throw new InvalidResponseDataError({
|
|
961
986
|
data: toolCallDelta,
|
|
962
987
|
message: `Expected 'function.name' to be a string.`
|
|
@@ -972,12 +997,12 @@ var OpenAIChatLanguageModel = class {
|
|
|
972
997
|
type: "function",
|
|
973
998
|
function: {
|
|
974
999
|
name: toolCallDelta.function.name,
|
|
975
|
-
arguments: (
|
|
1000
|
+
arguments: (_g = toolCallDelta.function.arguments) != null ? _g : ""
|
|
976
1001
|
},
|
|
977
1002
|
hasFinished: false
|
|
978
1003
|
};
|
|
979
1004
|
const toolCall2 = toolCalls[index];
|
|
980
|
-
if (((
|
|
1005
|
+
if (((_h = toolCall2.function) == null ? void 0 : _h.name) != null && ((_i = toolCall2.function) == null ? void 0 : _i.arguments) != null) {
|
|
981
1006
|
if (toolCall2.function.arguments.length > 0) {
|
|
982
1007
|
controller.enqueue({
|
|
983
1008
|
type: "tool-input-delta",
|
|
@@ -992,7 +1017,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
992
1017
|
});
|
|
993
1018
|
controller.enqueue({
|
|
994
1019
|
type: "tool-call",
|
|
995
|
-
toolCallId: (
|
|
1020
|
+
toolCallId: (_j = toolCall2.id) != null ? _j : generateId(),
|
|
996
1021
|
toolName: toolCall2.function.name,
|
|
997
1022
|
input: toolCall2.function.arguments
|
|
998
1023
|
});
|
|
@@ -1005,22 +1030,22 @@ var OpenAIChatLanguageModel = class {
|
|
|
1005
1030
|
if (toolCall.hasFinished) {
|
|
1006
1031
|
continue;
|
|
1007
1032
|
}
|
|
1008
|
-
if (((
|
|
1009
|
-
toolCall.function.arguments += (
|
|
1033
|
+
if (((_k = toolCallDelta.function) == null ? void 0 : _k.arguments) != null) {
|
|
1034
|
+
toolCall.function.arguments += (_m = (_l = toolCallDelta.function) == null ? void 0 : _l.arguments) != null ? _m : "";
|
|
1010
1035
|
}
|
|
1011
1036
|
controller.enqueue({
|
|
1012
1037
|
type: "tool-input-delta",
|
|
1013
1038
|
id: toolCall.id,
|
|
1014
|
-
delta: (
|
|
1039
|
+
delta: (_n = toolCallDelta.function.arguments) != null ? _n : ""
|
|
1015
1040
|
});
|
|
1016
|
-
if (((
|
|
1041
|
+
if (((_o = toolCall.function) == null ? void 0 : _o.name) != null && ((_p = toolCall.function) == null ? void 0 : _p.arguments) != null && isParsableJson(toolCall.function.arguments)) {
|
|
1017
1042
|
controller.enqueue({
|
|
1018
1043
|
type: "tool-input-end",
|
|
1019
1044
|
id: toolCall.id
|
|
1020
1045
|
});
|
|
1021
1046
|
controller.enqueue({
|
|
1022
1047
|
type: "tool-call",
|
|
1023
|
-
toolCallId: (
|
|
1048
|
+
toolCallId: (_q = toolCall.id) != null ? _q : generateId(),
|
|
1024
1049
|
toolName: toolCall.function.name,
|
|
1025
1050
|
input: toolCall.function.arguments
|
|
1026
1051
|
});
|
|
@@ -1047,7 +1072,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
1047
1072
|
controller.enqueue({
|
|
1048
1073
|
type: "finish",
|
|
1049
1074
|
finishReason,
|
|
1050
|
-
usage,
|
|
1075
|
+
usage: convertOpenAIChatUsage(usage),
|
|
1051
1076
|
...providerMetadata != null ? { providerMetadata } : {}
|
|
1052
1077
|
});
|
|
1053
1078
|
}
|
|
@@ -1068,6 +1093,43 @@ import {
|
|
|
1068
1093
|
postJsonToApi as postJsonToApi2
|
|
1069
1094
|
} from "@ai-sdk/provider-utils";
|
|
1070
1095
|
|
|
1096
|
+
// src/completion/convert-openai-completion-usage.ts
|
|
1097
|
+
function convertOpenAICompletionUsage(usage) {
|
|
1098
|
+
var _a, _b, _c, _d;
|
|
1099
|
+
if (usage == null) {
|
|
1100
|
+
return {
|
|
1101
|
+
inputTokens: {
|
|
1102
|
+
total: void 0,
|
|
1103
|
+
noCache: void 0,
|
|
1104
|
+
cacheRead: void 0,
|
|
1105
|
+
cacheWrite: void 0
|
|
1106
|
+
},
|
|
1107
|
+
outputTokens: {
|
|
1108
|
+
total: void 0,
|
|
1109
|
+
text: void 0,
|
|
1110
|
+
reasoning: void 0
|
|
1111
|
+
},
|
|
1112
|
+
raw: void 0
|
|
1113
|
+
};
|
|
1114
|
+
}
|
|
1115
|
+
const promptTokens = (_a = usage.prompt_tokens) != null ? _a : 0;
|
|
1116
|
+
const completionTokens = (_b = usage.completion_tokens) != null ? _b : 0;
|
|
1117
|
+
return {
|
|
1118
|
+
inputTokens: {
|
|
1119
|
+
total: (_c = usage.prompt_tokens) != null ? _c : void 0,
|
|
1120
|
+
noCache: promptTokens,
|
|
1121
|
+
cacheRead: void 0,
|
|
1122
|
+
cacheWrite: void 0
|
|
1123
|
+
},
|
|
1124
|
+
outputTokens: {
|
|
1125
|
+
total: (_d = usage.completion_tokens) != null ? _d : void 0,
|
|
1126
|
+
text: completionTokens,
|
|
1127
|
+
reasoning: void 0
|
|
1128
|
+
},
|
|
1129
|
+
raw: usage
|
|
1130
|
+
};
|
|
1131
|
+
}
|
|
1132
|
+
|
|
1071
1133
|
// src/completion/convert-to-openai-completion-prompt.ts
|
|
1072
1134
|
import {
|
|
1073
1135
|
InvalidPromptError,
|
|
@@ -1370,7 +1432,6 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1370
1432
|
};
|
|
1371
1433
|
}
|
|
1372
1434
|
async doGenerate(options) {
|
|
1373
|
-
var _a, _b, _c;
|
|
1374
1435
|
const { args, warnings } = await this.getArgs(options);
|
|
1375
1436
|
const {
|
|
1376
1437
|
responseHeaders,
|
|
@@ -1397,11 +1458,7 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1397
1458
|
}
|
|
1398
1459
|
return {
|
|
1399
1460
|
content: [{ type: "text", text: choice.text }],
|
|
1400
|
-
usage:
|
|
1401
|
-
inputTokens: (_a = response.usage) == null ? void 0 : _a.prompt_tokens,
|
|
1402
|
-
outputTokens: (_b = response.usage) == null ? void 0 : _b.completion_tokens,
|
|
1403
|
-
totalTokens: (_c = response.usage) == null ? void 0 : _c.total_tokens
|
|
1404
|
-
},
|
|
1461
|
+
usage: convertOpenAICompletionUsage(response.usage),
|
|
1405
1462
|
finishReason: mapOpenAIFinishReason2(choice.finish_reason),
|
|
1406
1463
|
request: { body: args },
|
|
1407
1464
|
response: {
|
|
@@ -1438,11 +1495,7 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1438
1495
|
});
|
|
1439
1496
|
let finishReason = "unknown";
|
|
1440
1497
|
const providerMetadata = { openai: {} };
|
|
1441
|
-
|
|
1442
|
-
inputTokens: void 0,
|
|
1443
|
-
outputTokens: void 0,
|
|
1444
|
-
totalTokens: void 0
|
|
1445
|
-
};
|
|
1498
|
+
let usage = void 0;
|
|
1446
1499
|
let isFirstChunk = true;
|
|
1447
1500
|
return {
|
|
1448
1501
|
stream: response.pipeThrough(
|
|
@@ -1474,9 +1527,7 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1474
1527
|
controller.enqueue({ type: "text-start", id: "0" });
|
|
1475
1528
|
}
|
|
1476
1529
|
if (value.usage != null) {
|
|
1477
|
-
usage
|
|
1478
|
-
usage.outputTokens = value.usage.completion_tokens;
|
|
1479
|
-
usage.totalTokens = value.usage.total_tokens;
|
|
1530
|
+
usage = value.usage;
|
|
1480
1531
|
}
|
|
1481
1532
|
const choice = value.choices[0];
|
|
1482
1533
|
if ((choice == null ? void 0 : choice.finish_reason) != null) {
|
|
@@ -1501,7 +1552,7 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1501
1552
|
type: "finish",
|
|
1502
1553
|
finishReason,
|
|
1503
1554
|
providerMetadata,
|
|
1504
|
-
usage
|
|
1555
|
+
usage: convertOpenAICompletionUsage(usage)
|
|
1505
1556
|
});
|
|
1506
1557
|
}
|
|
1507
1558
|
})
|
|
@@ -2327,6 +2378,45 @@ import {
|
|
|
2327
2378
|
postJsonToApi as postJsonToApi5
|
|
2328
2379
|
} from "@ai-sdk/provider-utils";
|
|
2329
2380
|
|
|
2381
|
+
// src/responses/convert-openai-responses-usage.ts
|
|
2382
|
+
function convertOpenAIResponsesUsage(usage) {
|
|
2383
|
+
var _a, _b, _c, _d;
|
|
2384
|
+
if (usage == null) {
|
|
2385
|
+
return {
|
|
2386
|
+
inputTokens: {
|
|
2387
|
+
total: void 0,
|
|
2388
|
+
noCache: void 0,
|
|
2389
|
+
cacheRead: void 0,
|
|
2390
|
+
cacheWrite: void 0
|
|
2391
|
+
},
|
|
2392
|
+
outputTokens: {
|
|
2393
|
+
total: void 0,
|
|
2394
|
+
text: void 0,
|
|
2395
|
+
reasoning: void 0
|
|
2396
|
+
},
|
|
2397
|
+
raw: void 0
|
|
2398
|
+
};
|
|
2399
|
+
}
|
|
2400
|
+
const inputTokens = usage.input_tokens;
|
|
2401
|
+
const outputTokens = usage.output_tokens;
|
|
2402
|
+
const cachedTokens = (_b = (_a = usage.input_tokens_details) == null ? void 0 : _a.cached_tokens) != null ? _b : 0;
|
|
2403
|
+
const reasoningTokens = (_d = (_c = usage.output_tokens_details) == null ? void 0 : _c.reasoning_tokens) != null ? _d : 0;
|
|
2404
|
+
return {
|
|
2405
|
+
inputTokens: {
|
|
2406
|
+
total: inputTokens,
|
|
2407
|
+
noCache: inputTokens - cachedTokens,
|
|
2408
|
+
cacheRead: cachedTokens,
|
|
2409
|
+
cacheWrite: void 0
|
|
2410
|
+
},
|
|
2411
|
+
outputTokens: {
|
|
2412
|
+
total: outputTokens,
|
|
2413
|
+
text: outputTokens - reasoningTokens,
|
|
2414
|
+
reasoning: reasoningTokens
|
|
2415
|
+
},
|
|
2416
|
+
raw: usage
|
|
2417
|
+
};
|
|
2418
|
+
}
|
|
2419
|
+
|
|
2330
2420
|
// src/responses/convert-to-openai-responses-input.ts
|
|
2331
2421
|
import {
|
|
2332
2422
|
UnsupportedFunctionalityError as UnsupportedFunctionalityError4
|
|
@@ -3995,7 +4085,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3995
4085
|
};
|
|
3996
4086
|
}
|
|
3997
4087
|
async doGenerate(options) {
|
|
3998
|
-
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x
|
|
4088
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x;
|
|
3999
4089
|
const {
|
|
4000
4090
|
args: body,
|
|
4001
4091
|
warnings,
|
|
@@ -4396,13 +4486,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
4396
4486
|
finishReason: (_x = response.incomplete_details) == null ? void 0 : _x.reason,
|
|
4397
4487
|
hasFunctionCall
|
|
4398
4488
|
}),
|
|
4399
|
-
usage:
|
|
4400
|
-
inputTokens: usage.input_tokens,
|
|
4401
|
-
outputTokens: usage.output_tokens,
|
|
4402
|
-
totalTokens: usage.input_tokens + usage.output_tokens,
|
|
4403
|
-
reasoningTokens: (_z = (_y = usage.output_tokens_details) == null ? void 0 : _y.reasoning_tokens) != null ? _z : void 0,
|
|
4404
|
-
cachedInputTokens: (_B = (_A = usage.input_tokens_details) == null ? void 0 : _A.cached_tokens) != null ? _B : void 0
|
|
4405
|
-
},
|
|
4489
|
+
usage: convertOpenAIResponsesUsage(usage),
|
|
4406
4490
|
request: { body },
|
|
4407
4491
|
response: {
|
|
4408
4492
|
id: response.id,
|
|
@@ -4443,11 +4527,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
4443
4527
|
const self = this;
|
|
4444
4528
|
const providerKey = this.config.provider.replace(".responses", "");
|
|
4445
4529
|
let finishReason = "unknown";
|
|
4446
|
-
|
|
4447
|
-
inputTokens: void 0,
|
|
4448
|
-
outputTokens: void 0,
|
|
4449
|
-
totalTokens: void 0
|
|
4450
|
-
};
|
|
4530
|
+
let usage = void 0;
|
|
4451
4531
|
const logprobs = [];
|
|
4452
4532
|
let responseId = null;
|
|
4453
4533
|
const ongoingToolCalls = {};
|
|
@@ -4462,7 +4542,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
4462
4542
|
controller.enqueue({ type: "stream-start", warnings });
|
|
4463
4543
|
},
|
|
4464
4544
|
transform(chunk, controller) {
|
|
4465
|
-
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x, _y, _z, _A
|
|
4545
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x, _y, _z, _A;
|
|
4466
4546
|
if (options.includeRawChunks) {
|
|
4467
4547
|
controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
|
|
4468
4548
|
}
|
|
@@ -4970,11 +5050,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
4970
5050
|
finishReason: (_i = value.response.incomplete_details) == null ? void 0 : _i.reason,
|
|
4971
5051
|
hasFunctionCall
|
|
4972
5052
|
});
|
|
4973
|
-
usage
|
|
4974
|
-
usage.outputTokens = value.response.usage.output_tokens;
|
|
4975
|
-
usage.totalTokens = value.response.usage.input_tokens + value.response.usage.output_tokens;
|
|
4976
|
-
usage.reasoningTokens = (_k = (_j = value.response.usage.output_tokens_details) == null ? void 0 : _j.reasoning_tokens) != null ? _k : void 0;
|
|
4977
|
-
usage.cachedInputTokens = (_m = (_l = value.response.usage.input_tokens_details) == null ? void 0 : _l.cached_tokens) != null ? _m : void 0;
|
|
5053
|
+
usage = value.response.usage;
|
|
4978
5054
|
if (typeof value.response.service_tier === "string") {
|
|
4979
5055
|
serviceTier = value.response.service_tier;
|
|
4980
5056
|
}
|
|
@@ -4984,7 +5060,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
4984
5060
|
controller.enqueue({
|
|
4985
5061
|
type: "source",
|
|
4986
5062
|
sourceType: "url",
|
|
4987
|
-
id: (
|
|
5063
|
+
id: (_l = (_k = (_j = self.config).generateId) == null ? void 0 : _k.call(_j)) != null ? _l : generateId2(),
|
|
4988
5064
|
url: value.annotation.url,
|
|
4989
5065
|
title: value.annotation.title
|
|
4990
5066
|
});
|
|
@@ -4992,10 +5068,10 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
4992
5068
|
controller.enqueue({
|
|
4993
5069
|
type: "source",
|
|
4994
5070
|
sourceType: "document",
|
|
4995
|
-
id: (
|
|
5071
|
+
id: (_o = (_n = (_m = self.config).generateId) == null ? void 0 : _n.call(_m)) != null ? _o : generateId2(),
|
|
4996
5072
|
mediaType: "text/plain",
|
|
4997
|
-
title: (
|
|
4998
|
-
filename: (
|
|
5073
|
+
title: (_q = (_p = value.annotation.quote) != null ? _p : value.annotation.filename) != null ? _q : "Document",
|
|
5074
|
+
filename: (_r = value.annotation.filename) != null ? _r : value.annotation.file_id,
|
|
4999
5075
|
...value.annotation.file_id ? {
|
|
5000
5076
|
providerMetadata: {
|
|
5001
5077
|
[providerKey]: {
|
|
@@ -5008,10 +5084,10 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
5008
5084
|
controller.enqueue({
|
|
5009
5085
|
type: "source",
|
|
5010
5086
|
sourceType: "document",
|
|
5011
|
-
id: (
|
|
5087
|
+
id: (_u = (_t = (_s = self.config).generateId) == null ? void 0 : _t.call(_s)) != null ? _u : generateId2(),
|
|
5012
5088
|
mediaType: "text/plain",
|
|
5013
|
-
title: (
|
|
5014
|
-
filename: (
|
|
5089
|
+
title: (_w = (_v = value.annotation.filename) != null ? _v : value.annotation.file_id) != null ? _w : "Document",
|
|
5090
|
+
filename: (_x = value.annotation.filename) != null ? _x : value.annotation.file_id,
|
|
5015
5091
|
providerMetadata: {
|
|
5016
5092
|
[providerKey]: {
|
|
5017
5093
|
fileId: value.annotation.file_id,
|
|
@@ -5024,7 +5100,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
5024
5100
|
controller.enqueue({
|
|
5025
5101
|
type: "source",
|
|
5026
5102
|
sourceType: "document",
|
|
5027
|
-
id: (
|
|
5103
|
+
id: (_A = (_z = (_y = self.config).generateId) == null ? void 0 : _z.call(_y)) != null ? _A : generateId2(),
|
|
5028
5104
|
mediaType: "application/octet-stream",
|
|
5029
5105
|
title: value.annotation.file_id,
|
|
5030
5106
|
filename: value.annotation.file_id,
|
|
@@ -5055,7 +5131,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
5055
5131
|
controller.enqueue({
|
|
5056
5132
|
type: "finish",
|
|
5057
5133
|
finishReason,
|
|
5058
|
-
usage,
|
|
5134
|
+
usage: convertOpenAIResponsesUsage(usage),
|
|
5059
5135
|
providerMetadata
|
|
5060
5136
|
});
|
|
5061
5137
|
}
|
|
@@ -5482,7 +5558,7 @@ var OpenAITranscriptionModel = class {
|
|
|
5482
5558
|
};
|
|
5483
5559
|
|
|
5484
5560
|
// src/version.ts
|
|
5485
|
-
var VERSION = true ? "3.0.0-beta.
|
|
5561
|
+
var VERSION = true ? "3.0.0-beta.90" : "0.0.0-test";
|
|
5486
5562
|
|
|
5487
5563
|
// src/openai-provider.ts
|
|
5488
5564
|
function createOpenAI(options = {}) {
|