@ai-sdk/openai 3.0.0-beta.88 → 3.0.0-beta.90
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +16 -0
- package/dist/index.js +154 -78
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +154 -78
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.js +153 -77
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +153 -77
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +3 -3
package/dist/internal/index.mjs
CHANGED
|
@@ -47,6 +47,45 @@ function getOpenAILanguageModelCapabilities(modelId) {
|
|
|
47
47
|
};
|
|
48
48
|
}
|
|
49
49
|
|
|
50
|
+
// src/chat/convert-openai-chat-usage.ts
|
|
51
|
+
function convertOpenAIChatUsage(usage) {
|
|
52
|
+
var _a, _b, _c, _d, _e, _f;
|
|
53
|
+
if (usage == null) {
|
|
54
|
+
return {
|
|
55
|
+
inputTokens: {
|
|
56
|
+
total: void 0,
|
|
57
|
+
noCache: void 0,
|
|
58
|
+
cacheRead: void 0,
|
|
59
|
+
cacheWrite: void 0
|
|
60
|
+
},
|
|
61
|
+
outputTokens: {
|
|
62
|
+
total: void 0,
|
|
63
|
+
text: void 0,
|
|
64
|
+
reasoning: void 0
|
|
65
|
+
},
|
|
66
|
+
raw: void 0
|
|
67
|
+
};
|
|
68
|
+
}
|
|
69
|
+
const promptTokens = (_a = usage.prompt_tokens) != null ? _a : 0;
|
|
70
|
+
const completionTokens = (_b = usage.completion_tokens) != null ? _b : 0;
|
|
71
|
+
const cachedTokens = (_d = (_c = usage.prompt_tokens_details) == null ? void 0 : _c.cached_tokens) != null ? _d : 0;
|
|
72
|
+
const reasoningTokens = (_f = (_e = usage.completion_tokens_details) == null ? void 0 : _e.reasoning_tokens) != null ? _f : 0;
|
|
73
|
+
return {
|
|
74
|
+
inputTokens: {
|
|
75
|
+
total: promptTokens,
|
|
76
|
+
noCache: promptTokens - cachedTokens,
|
|
77
|
+
cacheRead: cachedTokens,
|
|
78
|
+
cacheWrite: void 0
|
|
79
|
+
},
|
|
80
|
+
outputTokens: {
|
|
81
|
+
total: completionTokens,
|
|
82
|
+
text: completionTokens - reasoningTokens,
|
|
83
|
+
reasoning: reasoningTokens
|
|
84
|
+
},
|
|
85
|
+
raw: usage
|
|
86
|
+
};
|
|
87
|
+
}
|
|
88
|
+
|
|
50
89
|
// src/chat/convert-to-openai-chat-messages.ts
|
|
51
90
|
import {
|
|
52
91
|
UnsupportedFunctionalityError
|
|
@@ -757,7 +796,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
757
796
|
};
|
|
758
797
|
}
|
|
759
798
|
async doGenerate(options) {
|
|
760
|
-
var _a, _b, _c, _d, _e, _f
|
|
799
|
+
var _a, _b, _c, _d, _e, _f;
|
|
761
800
|
const { args: body, warnings } = await this.getArgs(options);
|
|
762
801
|
const {
|
|
763
802
|
responseHeaders,
|
|
@@ -815,13 +854,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
815
854
|
return {
|
|
816
855
|
content,
|
|
817
856
|
finishReason: mapOpenAIFinishReason(choice.finish_reason),
|
|
818
|
-
usage:
|
|
819
|
-
inputTokens: (_h = (_g = response.usage) == null ? void 0 : _g.prompt_tokens) != null ? _h : void 0,
|
|
820
|
-
outputTokens: (_j = (_i = response.usage) == null ? void 0 : _i.completion_tokens) != null ? _j : void 0,
|
|
821
|
-
totalTokens: (_l = (_k = response.usage) == null ? void 0 : _k.total_tokens) != null ? _l : void 0,
|
|
822
|
-
reasoningTokens: (_m = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null ? _m : void 0,
|
|
823
|
-
cachedInputTokens: (_n = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens) != null ? _n : void 0
|
|
824
|
-
},
|
|
857
|
+
usage: convertOpenAIChatUsage(response.usage),
|
|
825
858
|
request: { body },
|
|
826
859
|
response: {
|
|
827
860
|
...getResponseMetadata(response),
|
|
@@ -857,11 +890,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
857
890
|
});
|
|
858
891
|
const toolCalls = [];
|
|
859
892
|
let finishReason = "unknown";
|
|
860
|
-
|
|
861
|
-
inputTokens: void 0,
|
|
862
|
-
outputTokens: void 0,
|
|
863
|
-
totalTokens: void 0
|
|
864
|
-
};
|
|
893
|
+
let usage = void 0;
|
|
865
894
|
let metadataExtracted = false;
|
|
866
895
|
let isActiveText = false;
|
|
867
896
|
const providerMetadata = { openai: {} };
|
|
@@ -872,7 +901,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
872
901
|
controller.enqueue({ type: "stream-start", warnings });
|
|
873
902
|
},
|
|
874
903
|
transform(chunk, controller) {
|
|
875
|
-
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q
|
|
904
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q;
|
|
876
905
|
if (options.includeRawChunks) {
|
|
877
906
|
controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
|
|
878
907
|
}
|
|
@@ -898,23 +927,19 @@ var OpenAIChatLanguageModel = class {
|
|
|
898
927
|
}
|
|
899
928
|
}
|
|
900
929
|
if (value.usage != null) {
|
|
901
|
-
usage
|
|
902
|
-
|
|
903
|
-
|
|
904
|
-
usage.reasoningTokens = (_e = (_d = value.usage.completion_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : void 0;
|
|
905
|
-
usage.cachedInputTokens = (_g = (_f = value.usage.prompt_tokens_details) == null ? void 0 : _f.cached_tokens) != null ? _g : void 0;
|
|
906
|
-
if (((_h = value.usage.completion_tokens_details) == null ? void 0 : _h.accepted_prediction_tokens) != null) {
|
|
907
|
-
providerMetadata.openai.acceptedPredictionTokens = (_i = value.usage.completion_tokens_details) == null ? void 0 : _i.accepted_prediction_tokens;
|
|
930
|
+
usage = value.usage;
|
|
931
|
+
if (((_a = value.usage.completion_tokens_details) == null ? void 0 : _a.accepted_prediction_tokens) != null) {
|
|
932
|
+
providerMetadata.openai.acceptedPredictionTokens = (_b = value.usage.completion_tokens_details) == null ? void 0 : _b.accepted_prediction_tokens;
|
|
908
933
|
}
|
|
909
|
-
if (((
|
|
910
|
-
providerMetadata.openai.rejectedPredictionTokens = (
|
|
934
|
+
if (((_c = value.usage.completion_tokens_details) == null ? void 0 : _c.rejected_prediction_tokens) != null) {
|
|
935
|
+
providerMetadata.openai.rejectedPredictionTokens = (_d = value.usage.completion_tokens_details) == null ? void 0 : _d.rejected_prediction_tokens;
|
|
911
936
|
}
|
|
912
937
|
}
|
|
913
938
|
const choice = value.choices[0];
|
|
914
939
|
if ((choice == null ? void 0 : choice.finish_reason) != null) {
|
|
915
940
|
finishReason = mapOpenAIFinishReason(choice.finish_reason);
|
|
916
941
|
}
|
|
917
|
-
if (((
|
|
942
|
+
if (((_e = choice == null ? void 0 : choice.logprobs) == null ? void 0 : _e.content) != null) {
|
|
918
943
|
providerMetadata.openai.logprobs = choice.logprobs.content;
|
|
919
944
|
}
|
|
920
945
|
if ((choice == null ? void 0 : choice.delta) == null) {
|
|
@@ -948,7 +973,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
948
973
|
message: `Expected 'id' to be a string.`
|
|
949
974
|
});
|
|
950
975
|
}
|
|
951
|
-
if (((
|
|
976
|
+
if (((_f = toolCallDelta.function) == null ? void 0 : _f.name) == null) {
|
|
952
977
|
throw new InvalidResponseDataError({
|
|
953
978
|
data: toolCallDelta,
|
|
954
979
|
message: `Expected 'function.name' to be a string.`
|
|
@@ -964,12 +989,12 @@ var OpenAIChatLanguageModel = class {
|
|
|
964
989
|
type: "function",
|
|
965
990
|
function: {
|
|
966
991
|
name: toolCallDelta.function.name,
|
|
967
|
-
arguments: (
|
|
992
|
+
arguments: (_g = toolCallDelta.function.arguments) != null ? _g : ""
|
|
968
993
|
},
|
|
969
994
|
hasFinished: false
|
|
970
995
|
};
|
|
971
996
|
const toolCall2 = toolCalls[index];
|
|
972
|
-
if (((
|
|
997
|
+
if (((_h = toolCall2.function) == null ? void 0 : _h.name) != null && ((_i = toolCall2.function) == null ? void 0 : _i.arguments) != null) {
|
|
973
998
|
if (toolCall2.function.arguments.length > 0) {
|
|
974
999
|
controller.enqueue({
|
|
975
1000
|
type: "tool-input-delta",
|
|
@@ -984,7 +1009,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
984
1009
|
});
|
|
985
1010
|
controller.enqueue({
|
|
986
1011
|
type: "tool-call",
|
|
987
|
-
toolCallId: (
|
|
1012
|
+
toolCallId: (_j = toolCall2.id) != null ? _j : generateId(),
|
|
988
1013
|
toolName: toolCall2.function.name,
|
|
989
1014
|
input: toolCall2.function.arguments
|
|
990
1015
|
});
|
|
@@ -997,22 +1022,22 @@ var OpenAIChatLanguageModel = class {
|
|
|
997
1022
|
if (toolCall.hasFinished) {
|
|
998
1023
|
continue;
|
|
999
1024
|
}
|
|
1000
|
-
if (((
|
|
1001
|
-
toolCall.function.arguments += (
|
|
1025
|
+
if (((_k = toolCallDelta.function) == null ? void 0 : _k.arguments) != null) {
|
|
1026
|
+
toolCall.function.arguments += (_m = (_l = toolCallDelta.function) == null ? void 0 : _l.arguments) != null ? _m : "";
|
|
1002
1027
|
}
|
|
1003
1028
|
controller.enqueue({
|
|
1004
1029
|
type: "tool-input-delta",
|
|
1005
1030
|
id: toolCall.id,
|
|
1006
|
-
delta: (
|
|
1031
|
+
delta: (_n = toolCallDelta.function.arguments) != null ? _n : ""
|
|
1007
1032
|
});
|
|
1008
|
-
if (((
|
|
1033
|
+
if (((_o = toolCall.function) == null ? void 0 : _o.name) != null && ((_p = toolCall.function) == null ? void 0 : _p.arguments) != null && isParsableJson(toolCall.function.arguments)) {
|
|
1009
1034
|
controller.enqueue({
|
|
1010
1035
|
type: "tool-input-end",
|
|
1011
1036
|
id: toolCall.id
|
|
1012
1037
|
});
|
|
1013
1038
|
controller.enqueue({
|
|
1014
1039
|
type: "tool-call",
|
|
1015
|
-
toolCallId: (
|
|
1040
|
+
toolCallId: (_q = toolCall.id) != null ? _q : generateId(),
|
|
1016
1041
|
toolName: toolCall.function.name,
|
|
1017
1042
|
input: toolCall.function.arguments
|
|
1018
1043
|
});
|
|
@@ -1039,7 +1064,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
1039
1064
|
controller.enqueue({
|
|
1040
1065
|
type: "finish",
|
|
1041
1066
|
finishReason,
|
|
1042
|
-
usage,
|
|
1067
|
+
usage: convertOpenAIChatUsage(usage),
|
|
1043
1068
|
...providerMetadata != null ? { providerMetadata } : {}
|
|
1044
1069
|
});
|
|
1045
1070
|
}
|
|
@@ -1060,6 +1085,43 @@ import {
|
|
|
1060
1085
|
postJsonToApi as postJsonToApi2
|
|
1061
1086
|
} from "@ai-sdk/provider-utils";
|
|
1062
1087
|
|
|
1088
|
+
// src/completion/convert-openai-completion-usage.ts
|
|
1089
|
+
function convertOpenAICompletionUsage(usage) {
|
|
1090
|
+
var _a, _b, _c, _d;
|
|
1091
|
+
if (usage == null) {
|
|
1092
|
+
return {
|
|
1093
|
+
inputTokens: {
|
|
1094
|
+
total: void 0,
|
|
1095
|
+
noCache: void 0,
|
|
1096
|
+
cacheRead: void 0,
|
|
1097
|
+
cacheWrite: void 0
|
|
1098
|
+
},
|
|
1099
|
+
outputTokens: {
|
|
1100
|
+
total: void 0,
|
|
1101
|
+
text: void 0,
|
|
1102
|
+
reasoning: void 0
|
|
1103
|
+
},
|
|
1104
|
+
raw: void 0
|
|
1105
|
+
};
|
|
1106
|
+
}
|
|
1107
|
+
const promptTokens = (_a = usage.prompt_tokens) != null ? _a : 0;
|
|
1108
|
+
const completionTokens = (_b = usage.completion_tokens) != null ? _b : 0;
|
|
1109
|
+
return {
|
|
1110
|
+
inputTokens: {
|
|
1111
|
+
total: (_c = usage.prompt_tokens) != null ? _c : void 0,
|
|
1112
|
+
noCache: promptTokens,
|
|
1113
|
+
cacheRead: void 0,
|
|
1114
|
+
cacheWrite: void 0
|
|
1115
|
+
},
|
|
1116
|
+
outputTokens: {
|
|
1117
|
+
total: (_d = usage.completion_tokens) != null ? _d : void 0,
|
|
1118
|
+
text: completionTokens,
|
|
1119
|
+
reasoning: void 0
|
|
1120
|
+
},
|
|
1121
|
+
raw: usage
|
|
1122
|
+
};
|
|
1123
|
+
}
|
|
1124
|
+
|
|
1063
1125
|
// src/completion/convert-to-openai-completion-prompt.ts
|
|
1064
1126
|
import {
|
|
1065
1127
|
InvalidPromptError,
|
|
@@ -1362,7 +1424,6 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1362
1424
|
};
|
|
1363
1425
|
}
|
|
1364
1426
|
async doGenerate(options) {
|
|
1365
|
-
var _a, _b, _c;
|
|
1366
1427
|
const { args, warnings } = await this.getArgs(options);
|
|
1367
1428
|
const {
|
|
1368
1429
|
responseHeaders,
|
|
@@ -1389,11 +1450,7 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1389
1450
|
}
|
|
1390
1451
|
return {
|
|
1391
1452
|
content: [{ type: "text", text: choice.text }],
|
|
1392
|
-
usage:
|
|
1393
|
-
inputTokens: (_a = response.usage) == null ? void 0 : _a.prompt_tokens,
|
|
1394
|
-
outputTokens: (_b = response.usage) == null ? void 0 : _b.completion_tokens,
|
|
1395
|
-
totalTokens: (_c = response.usage) == null ? void 0 : _c.total_tokens
|
|
1396
|
-
},
|
|
1453
|
+
usage: convertOpenAICompletionUsage(response.usage),
|
|
1397
1454
|
finishReason: mapOpenAIFinishReason2(choice.finish_reason),
|
|
1398
1455
|
request: { body: args },
|
|
1399
1456
|
response: {
|
|
@@ -1430,11 +1487,7 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1430
1487
|
});
|
|
1431
1488
|
let finishReason = "unknown";
|
|
1432
1489
|
const providerMetadata = { openai: {} };
|
|
1433
|
-
|
|
1434
|
-
inputTokens: void 0,
|
|
1435
|
-
outputTokens: void 0,
|
|
1436
|
-
totalTokens: void 0
|
|
1437
|
-
};
|
|
1490
|
+
let usage = void 0;
|
|
1438
1491
|
let isFirstChunk = true;
|
|
1439
1492
|
return {
|
|
1440
1493
|
stream: response.pipeThrough(
|
|
@@ -1466,9 +1519,7 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1466
1519
|
controller.enqueue({ type: "text-start", id: "0" });
|
|
1467
1520
|
}
|
|
1468
1521
|
if (value.usage != null) {
|
|
1469
|
-
usage
|
|
1470
|
-
usage.outputTokens = value.usage.completion_tokens;
|
|
1471
|
-
usage.totalTokens = value.usage.total_tokens;
|
|
1522
|
+
usage = value.usage;
|
|
1472
1523
|
}
|
|
1473
1524
|
const choice = value.choices[0];
|
|
1474
1525
|
if ((choice == null ? void 0 : choice.finish_reason) != null) {
|
|
@@ -1493,7 +1544,7 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1493
1544
|
type: "finish",
|
|
1494
1545
|
finishReason,
|
|
1495
1546
|
providerMetadata,
|
|
1496
|
-
usage
|
|
1547
|
+
usage: convertOpenAICompletionUsage(usage)
|
|
1497
1548
|
});
|
|
1498
1549
|
}
|
|
1499
1550
|
})
|
|
@@ -2129,6 +2180,45 @@ import {
|
|
|
2129
2180
|
postJsonToApi as postJsonToApi6
|
|
2130
2181
|
} from "@ai-sdk/provider-utils";
|
|
2131
2182
|
|
|
2183
|
+
// src/responses/convert-openai-responses-usage.ts
|
|
2184
|
+
function convertOpenAIResponsesUsage(usage) {
|
|
2185
|
+
var _a, _b, _c, _d;
|
|
2186
|
+
if (usage == null) {
|
|
2187
|
+
return {
|
|
2188
|
+
inputTokens: {
|
|
2189
|
+
total: void 0,
|
|
2190
|
+
noCache: void 0,
|
|
2191
|
+
cacheRead: void 0,
|
|
2192
|
+
cacheWrite: void 0
|
|
2193
|
+
},
|
|
2194
|
+
outputTokens: {
|
|
2195
|
+
total: void 0,
|
|
2196
|
+
text: void 0,
|
|
2197
|
+
reasoning: void 0
|
|
2198
|
+
},
|
|
2199
|
+
raw: void 0
|
|
2200
|
+
};
|
|
2201
|
+
}
|
|
2202
|
+
const inputTokens = usage.input_tokens;
|
|
2203
|
+
const outputTokens = usage.output_tokens;
|
|
2204
|
+
const cachedTokens = (_b = (_a = usage.input_tokens_details) == null ? void 0 : _a.cached_tokens) != null ? _b : 0;
|
|
2205
|
+
const reasoningTokens = (_d = (_c = usage.output_tokens_details) == null ? void 0 : _c.reasoning_tokens) != null ? _d : 0;
|
|
2206
|
+
return {
|
|
2207
|
+
inputTokens: {
|
|
2208
|
+
total: inputTokens,
|
|
2209
|
+
noCache: inputTokens - cachedTokens,
|
|
2210
|
+
cacheRead: cachedTokens,
|
|
2211
|
+
cacheWrite: void 0
|
|
2212
|
+
},
|
|
2213
|
+
outputTokens: {
|
|
2214
|
+
total: outputTokens,
|
|
2215
|
+
text: outputTokens - reasoningTokens,
|
|
2216
|
+
reasoning: reasoningTokens
|
|
2217
|
+
},
|
|
2218
|
+
raw: usage
|
|
2219
|
+
};
|
|
2220
|
+
}
|
|
2221
|
+
|
|
2132
2222
|
// src/responses/convert-to-openai-responses-input.ts
|
|
2133
2223
|
import {
|
|
2134
2224
|
UnsupportedFunctionalityError as UnsupportedFunctionalityError4
|
|
@@ -4255,7 +4345,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
4255
4345
|
};
|
|
4256
4346
|
}
|
|
4257
4347
|
async doGenerate(options) {
|
|
4258
|
-
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x
|
|
4348
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x;
|
|
4259
4349
|
const {
|
|
4260
4350
|
args: body,
|
|
4261
4351
|
warnings,
|
|
@@ -4656,13 +4746,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
4656
4746
|
finishReason: (_x = response.incomplete_details) == null ? void 0 : _x.reason,
|
|
4657
4747
|
hasFunctionCall
|
|
4658
4748
|
}),
|
|
4659
|
-
usage:
|
|
4660
|
-
inputTokens: usage.input_tokens,
|
|
4661
|
-
outputTokens: usage.output_tokens,
|
|
4662
|
-
totalTokens: usage.input_tokens + usage.output_tokens,
|
|
4663
|
-
reasoningTokens: (_z = (_y = usage.output_tokens_details) == null ? void 0 : _y.reasoning_tokens) != null ? _z : void 0,
|
|
4664
|
-
cachedInputTokens: (_B = (_A = usage.input_tokens_details) == null ? void 0 : _A.cached_tokens) != null ? _B : void 0
|
|
4665
|
-
},
|
|
4749
|
+
usage: convertOpenAIResponsesUsage(usage),
|
|
4666
4750
|
request: { body },
|
|
4667
4751
|
response: {
|
|
4668
4752
|
id: response.id,
|
|
@@ -4703,11 +4787,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
4703
4787
|
const self = this;
|
|
4704
4788
|
const providerKey = this.config.provider.replace(".responses", "");
|
|
4705
4789
|
let finishReason = "unknown";
|
|
4706
|
-
|
|
4707
|
-
inputTokens: void 0,
|
|
4708
|
-
outputTokens: void 0,
|
|
4709
|
-
totalTokens: void 0
|
|
4710
|
-
};
|
|
4790
|
+
let usage = void 0;
|
|
4711
4791
|
const logprobs = [];
|
|
4712
4792
|
let responseId = null;
|
|
4713
4793
|
const ongoingToolCalls = {};
|
|
@@ -4722,7 +4802,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
4722
4802
|
controller.enqueue({ type: "stream-start", warnings });
|
|
4723
4803
|
},
|
|
4724
4804
|
transform(chunk, controller) {
|
|
4725
|
-
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x, _y, _z, _A
|
|
4805
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x, _y, _z, _A;
|
|
4726
4806
|
if (options.includeRawChunks) {
|
|
4727
4807
|
controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
|
|
4728
4808
|
}
|
|
@@ -5230,11 +5310,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
5230
5310
|
finishReason: (_i = value.response.incomplete_details) == null ? void 0 : _i.reason,
|
|
5231
5311
|
hasFunctionCall
|
|
5232
5312
|
});
|
|
5233
|
-
usage
|
|
5234
|
-
usage.outputTokens = value.response.usage.output_tokens;
|
|
5235
|
-
usage.totalTokens = value.response.usage.input_tokens + value.response.usage.output_tokens;
|
|
5236
|
-
usage.reasoningTokens = (_k = (_j = value.response.usage.output_tokens_details) == null ? void 0 : _j.reasoning_tokens) != null ? _k : void 0;
|
|
5237
|
-
usage.cachedInputTokens = (_m = (_l = value.response.usage.input_tokens_details) == null ? void 0 : _l.cached_tokens) != null ? _m : void 0;
|
|
5313
|
+
usage = value.response.usage;
|
|
5238
5314
|
if (typeof value.response.service_tier === "string") {
|
|
5239
5315
|
serviceTier = value.response.service_tier;
|
|
5240
5316
|
}
|
|
@@ -5244,7 +5320,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
5244
5320
|
controller.enqueue({
|
|
5245
5321
|
type: "source",
|
|
5246
5322
|
sourceType: "url",
|
|
5247
|
-
id: (
|
|
5323
|
+
id: (_l = (_k = (_j = self.config).generateId) == null ? void 0 : _k.call(_j)) != null ? _l : generateId2(),
|
|
5248
5324
|
url: value.annotation.url,
|
|
5249
5325
|
title: value.annotation.title
|
|
5250
5326
|
});
|
|
@@ -5252,10 +5328,10 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
5252
5328
|
controller.enqueue({
|
|
5253
5329
|
type: "source",
|
|
5254
5330
|
sourceType: "document",
|
|
5255
|
-
id: (
|
|
5331
|
+
id: (_o = (_n = (_m = self.config).generateId) == null ? void 0 : _n.call(_m)) != null ? _o : generateId2(),
|
|
5256
5332
|
mediaType: "text/plain",
|
|
5257
|
-
title: (
|
|
5258
|
-
filename: (
|
|
5333
|
+
title: (_q = (_p = value.annotation.quote) != null ? _p : value.annotation.filename) != null ? _q : "Document",
|
|
5334
|
+
filename: (_r = value.annotation.filename) != null ? _r : value.annotation.file_id,
|
|
5259
5335
|
...value.annotation.file_id ? {
|
|
5260
5336
|
providerMetadata: {
|
|
5261
5337
|
[providerKey]: {
|
|
@@ -5268,10 +5344,10 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
5268
5344
|
controller.enqueue({
|
|
5269
5345
|
type: "source",
|
|
5270
5346
|
sourceType: "document",
|
|
5271
|
-
id: (
|
|
5347
|
+
id: (_u = (_t = (_s = self.config).generateId) == null ? void 0 : _t.call(_s)) != null ? _u : generateId2(),
|
|
5272
5348
|
mediaType: "text/plain",
|
|
5273
|
-
title: (
|
|
5274
|
-
filename: (
|
|
5349
|
+
title: (_w = (_v = value.annotation.filename) != null ? _v : value.annotation.file_id) != null ? _w : "Document",
|
|
5350
|
+
filename: (_x = value.annotation.filename) != null ? _x : value.annotation.file_id,
|
|
5275
5351
|
providerMetadata: {
|
|
5276
5352
|
[providerKey]: {
|
|
5277
5353
|
fileId: value.annotation.file_id,
|
|
@@ -5284,7 +5360,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
5284
5360
|
controller.enqueue({
|
|
5285
5361
|
type: "source",
|
|
5286
5362
|
sourceType: "document",
|
|
5287
|
-
id: (
|
|
5363
|
+
id: (_A = (_z = (_y = self.config).generateId) == null ? void 0 : _z.call(_y)) != null ? _A : generateId2(),
|
|
5288
5364
|
mediaType: "application/octet-stream",
|
|
5289
5365
|
title: value.annotation.file_id,
|
|
5290
5366
|
filename: value.annotation.file_id,
|
|
@@ -5315,7 +5391,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
5315
5391
|
controller.enqueue({
|
|
5316
5392
|
type: "finish",
|
|
5317
5393
|
finishReason,
|
|
5318
|
-
usage,
|
|
5394
|
+
usage: convertOpenAIResponsesUsage(usage),
|
|
5319
5395
|
providerMetadata
|
|
5320
5396
|
});
|
|
5321
5397
|
}
|