@ai-sdk/openai 2.0.0-canary.15 → 2.0.0-canary.17
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +21 -0
- package/README.md +2 -2
- package/dist/index.d.mts +1 -8
- package/dist/index.d.ts +1 -8
- package/dist/index.js +88 -86
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +88 -86
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +15 -17
- package/dist/internal/index.d.ts +15 -17
- package/dist/internal/index.js +85 -77
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +85 -77
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +4 -4
package/dist/internal/index.mjs
CHANGED
|
@@ -529,7 +529,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
529
529
|
};
|
|
530
530
|
}
|
|
531
531
|
async doGenerate(options) {
|
|
532
|
-
var _a, _b, _c, _d, _e, _f, _g, _h, _i;
|
|
532
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m;
|
|
533
533
|
const { args: body, warnings } = await this.getArgs(options);
|
|
534
534
|
const {
|
|
535
535
|
responseHeaders,
|
|
@@ -567,18 +567,12 @@ var OpenAIChatLanguageModel = class {
|
|
|
567
567
|
const completionTokenDetails = (_c = response.usage) == null ? void 0 : _c.completion_tokens_details;
|
|
568
568
|
const promptTokenDetails = (_d = response.usage) == null ? void 0 : _d.prompt_tokens_details;
|
|
569
569
|
const providerMetadata = { openai: {} };
|
|
570
|
-
if ((completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null) {
|
|
571
|
-
providerMetadata.openai.reasoningTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens;
|
|
572
|
-
}
|
|
573
570
|
if ((completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens) != null) {
|
|
574
571
|
providerMetadata.openai.acceptedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens;
|
|
575
572
|
}
|
|
576
573
|
if ((completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens) != null) {
|
|
577
574
|
providerMetadata.openai.rejectedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens;
|
|
578
575
|
}
|
|
579
|
-
if ((promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens) != null) {
|
|
580
|
-
providerMetadata.openai.cachedPromptTokens = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens;
|
|
581
|
-
}
|
|
582
576
|
if (((_e = choice.logprobs) == null ? void 0 : _e.content) != null) {
|
|
583
577
|
providerMetadata.openai.logprobs = choice.logprobs.content;
|
|
584
578
|
}
|
|
@@ -587,7 +581,10 @@ var OpenAIChatLanguageModel = class {
|
|
|
587
581
|
finishReason: mapOpenAIFinishReason(choice.finish_reason),
|
|
588
582
|
usage: {
|
|
589
583
|
inputTokens: (_g = (_f = response.usage) == null ? void 0 : _f.prompt_tokens) != null ? _g : void 0,
|
|
590
|
-
outputTokens: (_i = (_h = response.usage) == null ? void 0 : _h.completion_tokens) != null ? _i : void 0
|
|
584
|
+
outputTokens: (_i = (_h = response.usage) == null ? void 0 : _h.completion_tokens) != null ? _i : void 0,
|
|
585
|
+
totalTokens: (_k = (_j = response.usage) == null ? void 0 : _j.total_tokens) != null ? _k : void 0,
|
|
586
|
+
reasoningTokens: (_l = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null ? _l : void 0,
|
|
587
|
+
cachedInputTokens: (_m = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens) != null ? _m : void 0
|
|
591
588
|
},
|
|
592
589
|
request: { body },
|
|
593
590
|
response: {
|
|
@@ -604,8 +601,9 @@ var OpenAIChatLanguageModel = class {
|
|
|
604
601
|
const body = {
|
|
605
602
|
...args,
|
|
606
603
|
stream: true,
|
|
607
|
-
|
|
608
|
-
|
|
604
|
+
stream_options: {
|
|
605
|
+
include_usage: true
|
|
606
|
+
}
|
|
609
607
|
};
|
|
610
608
|
const { responseHeaders, value: response } = await postJsonToApi({
|
|
611
609
|
url: this.config.url({
|
|
@@ -621,12 +619,12 @@ var OpenAIChatLanguageModel = class {
|
|
|
621
619
|
abortSignal: options.abortSignal,
|
|
622
620
|
fetch: this.config.fetch
|
|
623
621
|
});
|
|
624
|
-
const { messages: rawPrompt, ...rawSettings } = args;
|
|
625
622
|
const toolCalls = [];
|
|
626
623
|
let finishReason = "unknown";
|
|
627
624
|
const usage = {
|
|
628
625
|
inputTokens: void 0,
|
|
629
|
-
outputTokens: void 0
|
|
626
|
+
outputTokens: void 0,
|
|
627
|
+
totalTokens: void 0
|
|
630
628
|
};
|
|
631
629
|
let isFirstChunk = true;
|
|
632
630
|
const providerMetadata = { openai: {} };
|
|
@@ -637,7 +635,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
637
635
|
controller.enqueue({ type: "stream-start", warnings });
|
|
638
636
|
},
|
|
639
637
|
transform(chunk, controller) {
|
|
640
|
-
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l;
|
|
638
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x;
|
|
641
639
|
if (!chunk.success) {
|
|
642
640
|
finishReason = "error";
|
|
643
641
|
controller.enqueue({ type: "error", error: chunk.error });
|
|
@@ -657,31 +655,25 @@ var OpenAIChatLanguageModel = class {
|
|
|
657
655
|
});
|
|
658
656
|
}
|
|
659
657
|
if (value.usage != null) {
|
|
660
|
-
|
|
661
|
-
|
|
662
|
-
|
|
663
|
-
|
|
664
|
-
|
|
665
|
-
|
|
666
|
-
|
|
667
|
-
usage.outputTokens = completion_tokens != null ? completion_tokens : void 0;
|
|
668
|
-
if ((completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens) != null) {
|
|
669
|
-
providerMetadata.openai.reasoningTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens;
|
|
670
|
-
}
|
|
671
|
-
if ((completion_tokens_details == null ? void 0 : completion_tokens_details.accepted_prediction_tokens) != null) {
|
|
672
|
-
providerMetadata.openai.acceptedPredictionTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.accepted_prediction_tokens;
|
|
658
|
+
usage.inputTokens = (_a = value.usage.prompt_tokens) != null ? _a : void 0;
|
|
659
|
+
usage.outputTokens = (_b = value.usage.completion_tokens) != null ? _b : void 0;
|
|
660
|
+
usage.totalTokens = (_c = value.usage.total_tokens) != null ? _c : void 0;
|
|
661
|
+
usage.reasoningTokens = (_e = (_d = value.usage.completion_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : void 0;
|
|
662
|
+
usage.cachedInputTokens = (_g = (_f = value.usage.prompt_tokens_details) == null ? void 0 : _f.cached_tokens) != null ? _g : void 0;
|
|
663
|
+
if (((_h = value.usage.completion_tokens_details) == null ? void 0 : _h.accepted_prediction_tokens) != null) {
|
|
664
|
+
providerMetadata.openai.acceptedPredictionTokens = (_i = value.usage.completion_tokens_details) == null ? void 0 : _i.accepted_prediction_tokens;
|
|
673
665
|
}
|
|
674
|
-
if ((completion_tokens_details == null ? void 0 :
|
|
675
|
-
providerMetadata.openai.rejectedPredictionTokens = completion_tokens_details == null ? void 0 :
|
|
676
|
-
}
|
|
677
|
-
if ((prompt_tokens_details == null ? void 0 : prompt_tokens_details.cached_tokens) != null) {
|
|
678
|
-
providerMetadata.openai.cachedPromptTokens = prompt_tokens_details == null ? void 0 : prompt_tokens_details.cached_tokens;
|
|
666
|
+
if (((_j = value.usage.completion_tokens_details) == null ? void 0 : _j.rejected_prediction_tokens) != null) {
|
|
667
|
+
providerMetadata.openai.rejectedPredictionTokens = (_k = value.usage.completion_tokens_details) == null ? void 0 : _k.rejected_prediction_tokens;
|
|
679
668
|
}
|
|
680
669
|
}
|
|
681
670
|
const choice = value.choices[0];
|
|
682
671
|
if ((choice == null ? void 0 : choice.finish_reason) != null) {
|
|
683
672
|
finishReason = mapOpenAIFinishReason(choice.finish_reason);
|
|
684
673
|
}
|
|
674
|
+
if (((_l = choice == null ? void 0 : choice.logprobs) == null ? void 0 : _l.content) != null) {
|
|
675
|
+
providerMetadata.openai.logprobs = choice.logprobs.content;
|
|
676
|
+
}
|
|
685
677
|
if ((choice == null ? void 0 : choice.delta) == null) {
|
|
686
678
|
return;
|
|
687
679
|
}
|
|
@@ -708,7 +700,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
708
700
|
message: `Expected 'id' to be a string.`
|
|
709
701
|
});
|
|
710
702
|
}
|
|
711
|
-
if (((
|
|
703
|
+
if (((_m = toolCallDelta.function) == null ? void 0 : _m.name) == null) {
|
|
712
704
|
throw new InvalidResponseDataError({
|
|
713
705
|
data: toolCallDelta,
|
|
714
706
|
message: `Expected 'function.name' to be a string.`
|
|
@@ -719,12 +711,12 @@ var OpenAIChatLanguageModel = class {
|
|
|
719
711
|
type: "function",
|
|
720
712
|
function: {
|
|
721
713
|
name: toolCallDelta.function.name,
|
|
722
|
-
arguments: (
|
|
714
|
+
arguments: (_n = toolCallDelta.function.arguments) != null ? _n : ""
|
|
723
715
|
},
|
|
724
716
|
hasFinished: false
|
|
725
717
|
};
|
|
726
718
|
const toolCall2 = toolCalls[index];
|
|
727
|
-
if (((
|
|
719
|
+
if (((_o = toolCall2.function) == null ? void 0 : _o.name) != null && ((_p = toolCall2.function) == null ? void 0 : _p.arguments) != null) {
|
|
728
720
|
if (toolCall2.function.arguments.length > 0) {
|
|
729
721
|
controller.enqueue({
|
|
730
722
|
type: "tool-call-delta",
|
|
@@ -738,7 +730,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
738
730
|
controller.enqueue({
|
|
739
731
|
type: "tool-call",
|
|
740
732
|
toolCallType: "function",
|
|
741
|
-
toolCallId: (
|
|
733
|
+
toolCallId: (_q = toolCall2.id) != null ? _q : generateId(),
|
|
742
734
|
toolName: toolCall2.function.name,
|
|
743
735
|
args: toolCall2.function.arguments
|
|
744
736
|
});
|
|
@@ -751,21 +743,21 @@ var OpenAIChatLanguageModel = class {
|
|
|
751
743
|
if (toolCall.hasFinished) {
|
|
752
744
|
continue;
|
|
753
745
|
}
|
|
754
|
-
if (((
|
|
755
|
-
toolCall.function.arguments += (
|
|
746
|
+
if (((_r = toolCallDelta.function) == null ? void 0 : _r.arguments) != null) {
|
|
747
|
+
toolCall.function.arguments += (_t = (_s = toolCallDelta.function) == null ? void 0 : _s.arguments) != null ? _t : "";
|
|
756
748
|
}
|
|
757
749
|
controller.enqueue({
|
|
758
750
|
type: "tool-call-delta",
|
|
759
751
|
toolCallType: "function",
|
|
760
752
|
toolCallId: toolCall.id,
|
|
761
753
|
toolName: toolCall.function.name,
|
|
762
|
-
argsTextDelta: (
|
|
754
|
+
argsTextDelta: (_u = toolCallDelta.function.arguments) != null ? _u : ""
|
|
763
755
|
});
|
|
764
|
-
if (((
|
|
756
|
+
if (((_v = toolCall.function) == null ? void 0 : _v.name) != null && ((_w = toolCall.function) == null ? void 0 : _w.arguments) != null && isParsableJson(toolCall.function.arguments)) {
|
|
765
757
|
controller.enqueue({
|
|
766
758
|
type: "tool-call",
|
|
767
759
|
toolCallType: "function",
|
|
768
|
-
toolCallId: (
|
|
760
|
+
toolCallId: (_x = toolCall.id) != null ? _x : generateId(),
|
|
769
761
|
toolName: toolCall.function.name,
|
|
770
762
|
args: toolCall.function.arguments
|
|
771
763
|
});
|
|
@@ -792,6 +784,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
792
784
|
var openaiTokenUsageSchema = z3.object({
|
|
793
785
|
prompt_tokens: z3.number().nullish(),
|
|
794
786
|
completion_tokens: z3.number().nullish(),
|
|
787
|
+
total_tokens: z3.number().nullish(),
|
|
795
788
|
prompt_tokens_details: z3.object({
|
|
796
789
|
cached_tokens: z3.number().nullish()
|
|
797
790
|
}).nullish(),
|
|
@@ -863,6 +856,20 @@ var openaiChatChunkSchema = z3.union([
|
|
|
863
856
|
})
|
|
864
857
|
).nullish()
|
|
865
858
|
}).nullish(),
|
|
859
|
+
logprobs: z3.object({
|
|
860
|
+
content: z3.array(
|
|
861
|
+
z3.object({
|
|
862
|
+
token: z3.string(),
|
|
863
|
+
logprob: z3.number(),
|
|
864
|
+
top_logprobs: z3.array(
|
|
865
|
+
z3.object({
|
|
866
|
+
token: z3.string(),
|
|
867
|
+
logprob: z3.number()
|
|
868
|
+
})
|
|
869
|
+
)
|
|
870
|
+
})
|
|
871
|
+
).nullish()
|
|
872
|
+
}).nullish(),
|
|
866
873
|
finish_reason: z3.string().nullish(),
|
|
867
874
|
index: z3.number()
|
|
868
875
|
})
|
|
@@ -1133,6 +1140,7 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1133
1140
|
};
|
|
1134
1141
|
}
|
|
1135
1142
|
async doGenerate(options) {
|
|
1143
|
+
var _a, _b, _c;
|
|
1136
1144
|
const { args, warnings } = await this.getArgs(options);
|
|
1137
1145
|
const {
|
|
1138
1146
|
responseHeaders,
|
|
@@ -1160,8 +1168,9 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1160
1168
|
return {
|
|
1161
1169
|
content: [{ type: "text", text: choice.text }],
|
|
1162
1170
|
usage: {
|
|
1163
|
-
inputTokens: response.usage.prompt_tokens,
|
|
1164
|
-
outputTokens: response.usage.completion_tokens
|
|
1171
|
+
inputTokens: (_a = response.usage) == null ? void 0 : _a.prompt_tokens,
|
|
1172
|
+
outputTokens: (_b = response.usage) == null ? void 0 : _b.completion_tokens,
|
|
1173
|
+
totalTokens: (_c = response.usage) == null ? void 0 : _c.total_tokens
|
|
1165
1174
|
},
|
|
1166
1175
|
finishReason: mapOpenAIFinishReason(choice.finish_reason),
|
|
1167
1176
|
request: { body: args },
|
|
@@ -1179,8 +1188,9 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1179
1188
|
const body = {
|
|
1180
1189
|
...args,
|
|
1181
1190
|
stream: true,
|
|
1182
|
-
|
|
1183
|
-
|
|
1191
|
+
stream_options: {
|
|
1192
|
+
include_usage: true
|
|
1193
|
+
}
|
|
1184
1194
|
};
|
|
1185
1195
|
const { responseHeaders, value: response } = await postJsonToApi2({
|
|
1186
1196
|
url: this.config.url({
|
|
@@ -1200,7 +1210,8 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1200
1210
|
const providerMetadata = { openai: {} };
|
|
1201
1211
|
const usage = {
|
|
1202
1212
|
inputTokens: void 0,
|
|
1203
|
-
outputTokens: void 0
|
|
1213
|
+
outputTokens: void 0,
|
|
1214
|
+
totalTokens: void 0
|
|
1204
1215
|
};
|
|
1205
1216
|
let isFirstChunk = true;
|
|
1206
1217
|
return {
|
|
@@ -1231,6 +1242,7 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1231
1242
|
if (value.usage != null) {
|
|
1232
1243
|
usage.inputTokens = value.usage.prompt_tokens;
|
|
1233
1244
|
usage.outputTokens = value.usage.completion_tokens;
|
|
1245
|
+
usage.totalTokens = value.usage.total_tokens;
|
|
1234
1246
|
}
|
|
1235
1247
|
const choice = value.choices[0];
|
|
1236
1248
|
if ((choice == null ? void 0 : choice.finish_reason) != null) {
|
|
@@ -1261,6 +1273,11 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1261
1273
|
};
|
|
1262
1274
|
}
|
|
1263
1275
|
};
|
|
1276
|
+
var usageSchema = z5.object({
|
|
1277
|
+
prompt_tokens: z5.number(),
|
|
1278
|
+
completion_tokens: z5.number(),
|
|
1279
|
+
total_tokens: z5.number()
|
|
1280
|
+
});
|
|
1264
1281
|
var openaiCompletionResponseSchema = z5.object({
|
|
1265
1282
|
id: z5.string().nullish(),
|
|
1266
1283
|
created: z5.number().nullish(),
|
|
@@ -1276,10 +1293,7 @@ var openaiCompletionResponseSchema = z5.object({
|
|
|
1276
1293
|
}).nullish()
|
|
1277
1294
|
})
|
|
1278
1295
|
),
|
|
1279
|
-
usage:
|
|
1280
|
-
prompt_tokens: z5.number(),
|
|
1281
|
-
completion_tokens: z5.number()
|
|
1282
|
-
})
|
|
1296
|
+
usage: usageSchema.nullish()
|
|
1283
1297
|
});
|
|
1284
1298
|
var openaiCompletionChunkSchema = z5.union([
|
|
1285
1299
|
z5.object({
|
|
@@ -1298,10 +1312,7 @@ var openaiCompletionChunkSchema = z5.union([
|
|
|
1298
1312
|
}).nullish()
|
|
1299
1313
|
})
|
|
1300
1314
|
),
|
|
1301
|
-
usage:
|
|
1302
|
-
prompt_tokens: z5.number(),
|
|
1303
|
-
completion_tokens: z5.number()
|
|
1304
|
-
}).nullish()
|
|
1315
|
+
usage: usageSchema.nullish()
|
|
1305
1316
|
}),
|
|
1306
1317
|
openaiErrorDataSchema
|
|
1307
1318
|
]);
|
|
@@ -1518,25 +1529,25 @@ var openAITranscriptionProviderOptions = z9.object({
|
|
|
1518
1529
|
/**
|
|
1519
1530
|
* Additional information to include in the transcription response.
|
|
1520
1531
|
*/
|
|
1521
|
-
include: z9.array(z9.string()).
|
|
1532
|
+
include: z9.array(z9.string()).optional(),
|
|
1522
1533
|
/**
|
|
1523
1534
|
* The language of the input audio in ISO-639-1 format.
|
|
1524
1535
|
*/
|
|
1525
|
-
language: z9.string().
|
|
1536
|
+
language: z9.string().optional(),
|
|
1526
1537
|
/**
|
|
1527
1538
|
* An optional text to guide the model's style or continue a previous audio segment.
|
|
1528
1539
|
*/
|
|
1529
|
-
prompt: z9.string().
|
|
1540
|
+
prompt: z9.string().optional(),
|
|
1530
1541
|
/**
|
|
1531
1542
|
* The sampling temperature, between 0 and 1.
|
|
1532
1543
|
* @default 0
|
|
1533
1544
|
*/
|
|
1534
|
-
temperature: z9.number().min(0).max(1).default(0).
|
|
1545
|
+
temperature: z9.number().min(0).max(1).default(0).optional(),
|
|
1535
1546
|
/**
|
|
1536
1547
|
* The timestamp granularities to populate for this transcription.
|
|
1537
1548
|
* @default ['segment']
|
|
1538
1549
|
*/
|
|
1539
|
-
timestampGranularities: z9.array(z9.enum(["word", "segment"])).default(["segment"]).
|
|
1550
|
+
timestampGranularities: z9.array(z9.enum(["word", "segment"])).default(["segment"]).optional()
|
|
1540
1551
|
});
|
|
1541
1552
|
|
|
1542
1553
|
// src/openai-transcription-model.ts
|
|
@@ -2222,7 +2233,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2222
2233
|
])
|
|
2223
2234
|
),
|
|
2224
2235
|
incomplete_details: z12.object({ reason: z12.string() }).nullable(),
|
|
2225
|
-
usage:
|
|
2236
|
+
usage: usageSchema2
|
|
2226
2237
|
})
|
|
2227
2238
|
),
|
|
2228
2239
|
abortSignal: options.abortSignal,
|
|
@@ -2276,7 +2287,10 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2276
2287
|
}),
|
|
2277
2288
|
usage: {
|
|
2278
2289
|
inputTokens: response.usage.input_tokens,
|
|
2279
|
-
outputTokens: response.usage.output_tokens
|
|
2290
|
+
outputTokens: response.usage.output_tokens,
|
|
2291
|
+
totalTokens: response.usage.input_tokens + response.usage.output_tokens,
|
|
2292
|
+
reasoningTokens: (_f = (_e = response.usage.output_tokens_details) == null ? void 0 : _e.reasoning_tokens) != null ? _f : void 0,
|
|
2293
|
+
cachedInputTokens: (_h = (_g = response.usage.input_tokens_details) == null ? void 0 : _g.cached_tokens) != null ? _h : void 0
|
|
2280
2294
|
},
|
|
2281
2295
|
request: { body },
|
|
2282
2296
|
response: {
|
|
@@ -2288,9 +2302,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2288
2302
|
},
|
|
2289
2303
|
providerMetadata: {
|
|
2290
2304
|
openai: {
|
|
2291
|
-
responseId: response.id
|
|
2292
|
-
cachedPromptTokens: (_f = (_e = response.usage.input_tokens_details) == null ? void 0 : _e.cached_tokens) != null ? _f : null,
|
|
2293
|
-
reasoningTokens: (_h = (_g = response.usage.output_tokens_details) == null ? void 0 : _g.reasoning_tokens) != null ? _h : null
|
|
2305
|
+
responseId: response.id
|
|
2294
2306
|
}
|
|
2295
2307
|
},
|
|
2296
2308
|
warnings
|
|
@@ -2319,10 +2331,9 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2319
2331
|
let finishReason = "unknown";
|
|
2320
2332
|
const usage = {
|
|
2321
2333
|
inputTokens: void 0,
|
|
2322
|
-
outputTokens: void 0
|
|
2334
|
+
outputTokens: void 0,
|
|
2335
|
+
totalTokens: void 0
|
|
2323
2336
|
};
|
|
2324
|
-
let cachedPromptTokens = null;
|
|
2325
|
-
let reasoningTokens = null;
|
|
2326
2337
|
let responseId = null;
|
|
2327
2338
|
const ongoingToolCalls = {};
|
|
2328
2339
|
let hasToolCalls = false;
|
|
@@ -2400,8 +2411,9 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2400
2411
|
});
|
|
2401
2412
|
usage.inputTokens = value.response.usage.input_tokens;
|
|
2402
2413
|
usage.outputTokens = value.response.usage.output_tokens;
|
|
2403
|
-
|
|
2404
|
-
reasoningTokens = (
|
|
2414
|
+
usage.totalTokens = value.response.usage.input_tokens + value.response.usage.output_tokens;
|
|
2415
|
+
usage.reasoningTokens = (_c = (_b = value.response.usage.output_tokens_details) == null ? void 0 : _b.reasoning_tokens) != null ? _c : void 0;
|
|
2416
|
+
usage.cachedInputTokens = (_e = (_d = value.response.usage.input_tokens_details) == null ? void 0 : _d.cached_tokens) != null ? _e : void 0;
|
|
2405
2417
|
} else if (isResponseAnnotationAddedChunk(value)) {
|
|
2406
2418
|
controller.enqueue({
|
|
2407
2419
|
type: "source",
|
|
@@ -2417,13 +2429,9 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2417
2429
|
type: "finish",
|
|
2418
2430
|
finishReason,
|
|
2419
2431
|
usage,
|
|
2420
|
-
|
|
2421
|
-
|
|
2422
|
-
|
|
2423
|
-
responseId,
|
|
2424
|
-
cachedPromptTokens,
|
|
2425
|
-
reasoningTokens
|
|
2426
|
-
}
|
|
2432
|
+
providerMetadata: {
|
|
2433
|
+
openai: {
|
|
2434
|
+
responseId
|
|
2427
2435
|
}
|
|
2428
2436
|
}
|
|
2429
2437
|
});
|
|
@@ -2435,7 +2443,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2435
2443
|
};
|
|
2436
2444
|
}
|
|
2437
2445
|
};
|
|
2438
|
-
var
|
|
2446
|
+
var usageSchema2 = z12.object({
|
|
2439
2447
|
input_tokens: z12.number(),
|
|
2440
2448
|
input_tokens_details: z12.object({ cached_tokens: z12.number().nullish() }).nullish(),
|
|
2441
2449
|
output_tokens: z12.number(),
|
|
@@ -2449,7 +2457,7 @@ var responseFinishedChunkSchema = z12.object({
|
|
|
2449
2457
|
type: z12.enum(["response.completed", "response.incomplete"]),
|
|
2450
2458
|
response: z12.object({
|
|
2451
2459
|
incomplete_details: z12.object({ reason: z12.string() }).nullish(),
|
|
2452
|
-
usage:
|
|
2460
|
+
usage: usageSchema2
|
|
2453
2461
|
})
|
|
2454
2462
|
});
|
|
2455
2463
|
var responseCreatedChunkSchema = z12.object({
|