@ai-sdk/openai 2.0.0-canary.15 → 2.0.0-canary.16
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +13 -0
- package/README.md +2 -2
- package/dist/index.js +79 -73
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +79 -73
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +15 -15
- package/dist/internal/index.d.ts +15 -15
- package/dist/internal/index.js +79 -73
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +79 -73
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +4 -4
package/dist/index.mjs
CHANGED
|
@@ -535,7 +535,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
535
535
|
};
|
|
536
536
|
}
|
|
537
537
|
async doGenerate(options) {
|
|
538
|
-
var _a, _b, _c, _d, _e, _f, _g, _h, _i;
|
|
538
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m;
|
|
539
539
|
const { args: body, warnings } = await this.getArgs(options);
|
|
540
540
|
const {
|
|
541
541
|
responseHeaders,
|
|
@@ -573,18 +573,12 @@ var OpenAIChatLanguageModel = class {
|
|
|
573
573
|
const completionTokenDetails = (_c = response.usage) == null ? void 0 : _c.completion_tokens_details;
|
|
574
574
|
const promptTokenDetails = (_d = response.usage) == null ? void 0 : _d.prompt_tokens_details;
|
|
575
575
|
const providerMetadata = { openai: {} };
|
|
576
|
-
if ((completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null) {
|
|
577
|
-
providerMetadata.openai.reasoningTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens;
|
|
578
|
-
}
|
|
579
576
|
if ((completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens) != null) {
|
|
580
577
|
providerMetadata.openai.acceptedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens;
|
|
581
578
|
}
|
|
582
579
|
if ((completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens) != null) {
|
|
583
580
|
providerMetadata.openai.rejectedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens;
|
|
584
581
|
}
|
|
585
|
-
if ((promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens) != null) {
|
|
586
|
-
providerMetadata.openai.cachedPromptTokens = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens;
|
|
587
|
-
}
|
|
588
582
|
if (((_e = choice.logprobs) == null ? void 0 : _e.content) != null) {
|
|
589
583
|
providerMetadata.openai.logprobs = choice.logprobs.content;
|
|
590
584
|
}
|
|
@@ -593,7 +587,10 @@ var OpenAIChatLanguageModel = class {
|
|
|
593
587
|
finishReason: mapOpenAIFinishReason(choice.finish_reason),
|
|
594
588
|
usage: {
|
|
595
589
|
inputTokens: (_g = (_f = response.usage) == null ? void 0 : _f.prompt_tokens) != null ? _g : void 0,
|
|
596
|
-
outputTokens: (_i = (_h = response.usage) == null ? void 0 : _h.completion_tokens) != null ? _i : void 0
|
|
590
|
+
outputTokens: (_i = (_h = response.usage) == null ? void 0 : _h.completion_tokens) != null ? _i : void 0,
|
|
591
|
+
totalTokens: (_k = (_j = response.usage) == null ? void 0 : _j.total_tokens) != null ? _k : void 0,
|
|
592
|
+
reasoningTokens: (_l = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null ? _l : void 0,
|
|
593
|
+
cachedInputTokens: (_m = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens) != null ? _m : void 0
|
|
597
594
|
},
|
|
598
595
|
request: { body },
|
|
599
596
|
response: {
|
|
@@ -627,12 +624,12 @@ var OpenAIChatLanguageModel = class {
|
|
|
627
624
|
abortSignal: options.abortSignal,
|
|
628
625
|
fetch: this.config.fetch
|
|
629
626
|
});
|
|
630
|
-
const { messages: rawPrompt, ...rawSettings } = args;
|
|
631
627
|
const toolCalls = [];
|
|
632
628
|
let finishReason = "unknown";
|
|
633
629
|
const usage = {
|
|
634
630
|
inputTokens: void 0,
|
|
635
|
-
outputTokens: void 0
|
|
631
|
+
outputTokens: void 0,
|
|
632
|
+
totalTokens: void 0
|
|
636
633
|
};
|
|
637
634
|
let isFirstChunk = true;
|
|
638
635
|
const providerMetadata = { openai: {} };
|
|
@@ -643,7 +640,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
643
640
|
controller.enqueue({ type: "stream-start", warnings });
|
|
644
641
|
},
|
|
645
642
|
transform(chunk, controller) {
|
|
646
|
-
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l;
|
|
643
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x;
|
|
647
644
|
if (!chunk.success) {
|
|
648
645
|
finishReason = "error";
|
|
649
646
|
controller.enqueue({ type: "error", error: chunk.error });
|
|
@@ -663,31 +660,25 @@ var OpenAIChatLanguageModel = class {
|
|
|
663
660
|
});
|
|
664
661
|
}
|
|
665
662
|
if (value.usage != null) {
|
|
666
|
-
|
|
667
|
-
|
|
668
|
-
|
|
669
|
-
|
|
670
|
-
|
|
671
|
-
|
|
672
|
-
|
|
673
|
-
usage.outputTokens = completion_tokens != null ? completion_tokens : void 0;
|
|
674
|
-
if ((completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens) != null) {
|
|
675
|
-
providerMetadata.openai.reasoningTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens;
|
|
676
|
-
}
|
|
677
|
-
if ((completion_tokens_details == null ? void 0 : completion_tokens_details.accepted_prediction_tokens) != null) {
|
|
678
|
-
providerMetadata.openai.acceptedPredictionTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.accepted_prediction_tokens;
|
|
679
|
-
}
|
|
680
|
-
if ((completion_tokens_details == null ? void 0 : completion_tokens_details.rejected_prediction_tokens) != null) {
|
|
681
|
-
providerMetadata.openai.rejectedPredictionTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.rejected_prediction_tokens;
|
|
663
|
+
usage.inputTokens = (_a = value.usage.prompt_tokens) != null ? _a : void 0;
|
|
664
|
+
usage.outputTokens = (_b = value.usage.completion_tokens) != null ? _b : void 0;
|
|
665
|
+
usage.totalTokens = (_c = value.usage.total_tokens) != null ? _c : void 0;
|
|
666
|
+
usage.reasoningTokens = (_e = (_d = value.usage.completion_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : void 0;
|
|
667
|
+
usage.cachedInputTokens = (_g = (_f = value.usage.prompt_tokens_details) == null ? void 0 : _f.cached_tokens) != null ? _g : void 0;
|
|
668
|
+
if (((_h = value.usage.completion_tokens_details) == null ? void 0 : _h.accepted_prediction_tokens) != null) {
|
|
669
|
+
providerMetadata.openai.acceptedPredictionTokens = (_i = value.usage.completion_tokens_details) == null ? void 0 : _i.accepted_prediction_tokens;
|
|
682
670
|
}
|
|
683
|
-
if ((
|
|
684
|
-
providerMetadata.openai.
|
|
671
|
+
if (((_j = value.usage.completion_tokens_details) == null ? void 0 : _j.rejected_prediction_tokens) != null) {
|
|
672
|
+
providerMetadata.openai.rejectedPredictionTokens = (_k = value.usage.completion_tokens_details) == null ? void 0 : _k.rejected_prediction_tokens;
|
|
685
673
|
}
|
|
686
674
|
}
|
|
687
675
|
const choice = value.choices[0];
|
|
688
676
|
if ((choice == null ? void 0 : choice.finish_reason) != null) {
|
|
689
677
|
finishReason = mapOpenAIFinishReason(choice.finish_reason);
|
|
690
678
|
}
|
|
679
|
+
if (((_l = choice == null ? void 0 : choice.logprobs) == null ? void 0 : _l.content) != null) {
|
|
680
|
+
providerMetadata.openai.logprobs = choice.logprobs.content;
|
|
681
|
+
}
|
|
691
682
|
if ((choice == null ? void 0 : choice.delta) == null) {
|
|
692
683
|
return;
|
|
693
684
|
}
|
|
@@ -714,7 +705,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
714
705
|
message: `Expected 'id' to be a string.`
|
|
715
706
|
});
|
|
716
707
|
}
|
|
717
|
-
if (((
|
|
708
|
+
if (((_m = toolCallDelta.function) == null ? void 0 : _m.name) == null) {
|
|
718
709
|
throw new InvalidResponseDataError({
|
|
719
710
|
data: toolCallDelta,
|
|
720
711
|
message: `Expected 'function.name' to be a string.`
|
|
@@ -725,12 +716,12 @@ var OpenAIChatLanguageModel = class {
|
|
|
725
716
|
type: "function",
|
|
726
717
|
function: {
|
|
727
718
|
name: toolCallDelta.function.name,
|
|
728
|
-
arguments: (
|
|
719
|
+
arguments: (_n = toolCallDelta.function.arguments) != null ? _n : ""
|
|
729
720
|
},
|
|
730
721
|
hasFinished: false
|
|
731
722
|
};
|
|
732
723
|
const toolCall2 = toolCalls[index];
|
|
733
|
-
if (((
|
|
724
|
+
if (((_o = toolCall2.function) == null ? void 0 : _o.name) != null && ((_p = toolCall2.function) == null ? void 0 : _p.arguments) != null) {
|
|
734
725
|
if (toolCall2.function.arguments.length > 0) {
|
|
735
726
|
controller.enqueue({
|
|
736
727
|
type: "tool-call-delta",
|
|
@@ -744,7 +735,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
744
735
|
controller.enqueue({
|
|
745
736
|
type: "tool-call",
|
|
746
737
|
toolCallType: "function",
|
|
747
|
-
toolCallId: (
|
|
738
|
+
toolCallId: (_q = toolCall2.id) != null ? _q : generateId(),
|
|
748
739
|
toolName: toolCall2.function.name,
|
|
749
740
|
args: toolCall2.function.arguments
|
|
750
741
|
});
|
|
@@ -757,21 +748,21 @@ var OpenAIChatLanguageModel = class {
|
|
|
757
748
|
if (toolCall.hasFinished) {
|
|
758
749
|
continue;
|
|
759
750
|
}
|
|
760
|
-
if (((
|
|
761
|
-
toolCall.function.arguments += (
|
|
751
|
+
if (((_r = toolCallDelta.function) == null ? void 0 : _r.arguments) != null) {
|
|
752
|
+
toolCall.function.arguments += (_t = (_s = toolCallDelta.function) == null ? void 0 : _s.arguments) != null ? _t : "";
|
|
762
753
|
}
|
|
763
754
|
controller.enqueue({
|
|
764
755
|
type: "tool-call-delta",
|
|
765
756
|
toolCallType: "function",
|
|
766
757
|
toolCallId: toolCall.id,
|
|
767
758
|
toolName: toolCall.function.name,
|
|
768
|
-
argsTextDelta: (
|
|
759
|
+
argsTextDelta: (_u = toolCallDelta.function.arguments) != null ? _u : ""
|
|
769
760
|
});
|
|
770
|
-
if (((
|
|
761
|
+
if (((_v = toolCall.function) == null ? void 0 : _v.name) != null && ((_w = toolCall.function) == null ? void 0 : _w.arguments) != null && isParsableJson(toolCall.function.arguments)) {
|
|
771
762
|
controller.enqueue({
|
|
772
763
|
type: "tool-call",
|
|
773
764
|
toolCallType: "function",
|
|
774
|
-
toolCallId: (
|
|
765
|
+
toolCallId: (_x = toolCall.id) != null ? _x : generateId(),
|
|
775
766
|
toolName: toolCall.function.name,
|
|
776
767
|
args: toolCall.function.arguments
|
|
777
768
|
});
|
|
@@ -798,6 +789,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
798
789
|
var openaiTokenUsageSchema = z3.object({
|
|
799
790
|
prompt_tokens: z3.number().nullish(),
|
|
800
791
|
completion_tokens: z3.number().nullish(),
|
|
792
|
+
total_tokens: z3.number().nullish(),
|
|
801
793
|
prompt_tokens_details: z3.object({
|
|
802
794
|
cached_tokens: z3.number().nullish()
|
|
803
795
|
}).nullish(),
|
|
@@ -869,6 +861,20 @@ var openaiChatChunkSchema = z3.union([
|
|
|
869
861
|
})
|
|
870
862
|
).nullish()
|
|
871
863
|
}).nullish(),
|
|
864
|
+
logprobs: z3.object({
|
|
865
|
+
content: z3.array(
|
|
866
|
+
z3.object({
|
|
867
|
+
token: z3.string(),
|
|
868
|
+
logprob: z3.number(),
|
|
869
|
+
top_logprobs: z3.array(
|
|
870
|
+
z3.object({
|
|
871
|
+
token: z3.string(),
|
|
872
|
+
logprob: z3.number()
|
|
873
|
+
})
|
|
874
|
+
)
|
|
875
|
+
})
|
|
876
|
+
).nullish()
|
|
877
|
+
}).nullish(),
|
|
872
878
|
finish_reason: z3.string().nullish(),
|
|
873
879
|
index: z3.number()
|
|
874
880
|
})
|
|
@@ -1139,6 +1145,7 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1139
1145
|
};
|
|
1140
1146
|
}
|
|
1141
1147
|
async doGenerate(options) {
|
|
1148
|
+
var _a, _b, _c;
|
|
1142
1149
|
const { args, warnings } = await this.getArgs(options);
|
|
1143
1150
|
const {
|
|
1144
1151
|
responseHeaders,
|
|
@@ -1166,8 +1173,9 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1166
1173
|
return {
|
|
1167
1174
|
content: [{ type: "text", text: choice.text }],
|
|
1168
1175
|
usage: {
|
|
1169
|
-
inputTokens: response.usage.prompt_tokens,
|
|
1170
|
-
outputTokens: response.usage.completion_tokens
|
|
1176
|
+
inputTokens: (_a = response.usage) == null ? void 0 : _a.prompt_tokens,
|
|
1177
|
+
outputTokens: (_b = response.usage) == null ? void 0 : _b.completion_tokens,
|
|
1178
|
+
totalTokens: (_c = response.usage) == null ? void 0 : _c.total_tokens
|
|
1171
1179
|
},
|
|
1172
1180
|
finishReason: mapOpenAIFinishReason(choice.finish_reason),
|
|
1173
1181
|
request: { body: args },
|
|
@@ -1206,7 +1214,8 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1206
1214
|
const providerMetadata = { openai: {} };
|
|
1207
1215
|
const usage = {
|
|
1208
1216
|
inputTokens: void 0,
|
|
1209
|
-
outputTokens: void 0
|
|
1217
|
+
outputTokens: void 0,
|
|
1218
|
+
totalTokens: void 0
|
|
1210
1219
|
};
|
|
1211
1220
|
let isFirstChunk = true;
|
|
1212
1221
|
return {
|
|
@@ -1237,6 +1246,7 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1237
1246
|
if (value.usage != null) {
|
|
1238
1247
|
usage.inputTokens = value.usage.prompt_tokens;
|
|
1239
1248
|
usage.outputTokens = value.usage.completion_tokens;
|
|
1249
|
+
usage.totalTokens = value.usage.total_tokens;
|
|
1240
1250
|
}
|
|
1241
1251
|
const choice = value.choices[0];
|
|
1242
1252
|
if ((choice == null ? void 0 : choice.finish_reason) != null) {
|
|
@@ -1267,6 +1277,11 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1267
1277
|
};
|
|
1268
1278
|
}
|
|
1269
1279
|
};
|
|
1280
|
+
var usageSchema = z5.object({
|
|
1281
|
+
prompt_tokens: z5.number(),
|
|
1282
|
+
completion_tokens: z5.number(),
|
|
1283
|
+
total_tokens: z5.number()
|
|
1284
|
+
});
|
|
1270
1285
|
var openaiCompletionResponseSchema = z5.object({
|
|
1271
1286
|
id: z5.string().nullish(),
|
|
1272
1287
|
created: z5.number().nullish(),
|
|
@@ -1282,10 +1297,7 @@ var openaiCompletionResponseSchema = z5.object({
|
|
|
1282
1297
|
}).nullish()
|
|
1283
1298
|
})
|
|
1284
1299
|
),
|
|
1285
|
-
usage:
|
|
1286
|
-
prompt_tokens: z5.number(),
|
|
1287
|
-
completion_tokens: z5.number()
|
|
1288
|
-
})
|
|
1300
|
+
usage: usageSchema.nullish()
|
|
1289
1301
|
});
|
|
1290
1302
|
var openaiCompletionChunkSchema = z5.union([
|
|
1291
1303
|
z5.object({
|
|
@@ -1304,10 +1316,7 @@ var openaiCompletionChunkSchema = z5.union([
|
|
|
1304
1316
|
}).nullish()
|
|
1305
1317
|
})
|
|
1306
1318
|
),
|
|
1307
|
-
usage:
|
|
1308
|
-
prompt_tokens: z5.number(),
|
|
1309
|
-
completion_tokens: z5.number()
|
|
1310
|
-
}).nullish()
|
|
1319
|
+
usage: usageSchema.nullish()
|
|
1311
1320
|
}),
|
|
1312
1321
|
openaiErrorDataSchema
|
|
1313
1322
|
]);
|
|
@@ -1545,25 +1554,25 @@ var openAITranscriptionProviderOptions = z10.object({
|
|
|
1545
1554
|
/**
|
|
1546
1555
|
* Additional information to include in the transcription response.
|
|
1547
1556
|
*/
|
|
1548
|
-
include: z10.array(z10.string()).
|
|
1557
|
+
include: z10.array(z10.string()).optional(),
|
|
1549
1558
|
/**
|
|
1550
1559
|
* The language of the input audio in ISO-639-1 format.
|
|
1551
1560
|
*/
|
|
1552
|
-
language: z10.string().
|
|
1561
|
+
language: z10.string().optional(),
|
|
1553
1562
|
/**
|
|
1554
1563
|
* An optional text to guide the model's style or continue a previous audio segment.
|
|
1555
1564
|
*/
|
|
1556
|
-
prompt: z10.string().
|
|
1565
|
+
prompt: z10.string().optional(),
|
|
1557
1566
|
/**
|
|
1558
1567
|
* The sampling temperature, between 0 and 1.
|
|
1559
1568
|
* @default 0
|
|
1560
1569
|
*/
|
|
1561
|
-
temperature: z10.number().min(0).max(1).default(0).
|
|
1570
|
+
temperature: z10.number().min(0).max(1).default(0).optional(),
|
|
1562
1571
|
/**
|
|
1563
1572
|
* The timestamp granularities to populate for this transcription.
|
|
1564
1573
|
* @default ['segment']
|
|
1565
1574
|
*/
|
|
1566
|
-
timestampGranularities: z10.array(z10.enum(["word", "segment"])).default(["segment"]).
|
|
1575
|
+
timestampGranularities: z10.array(z10.enum(["word", "segment"])).default(["segment"]).optional()
|
|
1567
1576
|
});
|
|
1568
1577
|
|
|
1569
1578
|
// src/openai-transcription-model.ts
|
|
@@ -2145,7 +2154,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2145
2154
|
])
|
|
2146
2155
|
),
|
|
2147
2156
|
incomplete_details: z12.object({ reason: z12.string() }).nullable(),
|
|
2148
|
-
usage:
|
|
2157
|
+
usage: usageSchema2
|
|
2149
2158
|
})
|
|
2150
2159
|
),
|
|
2151
2160
|
abortSignal: options.abortSignal,
|
|
@@ -2199,7 +2208,10 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2199
2208
|
}),
|
|
2200
2209
|
usage: {
|
|
2201
2210
|
inputTokens: response.usage.input_tokens,
|
|
2202
|
-
outputTokens: response.usage.output_tokens
|
|
2211
|
+
outputTokens: response.usage.output_tokens,
|
|
2212
|
+
totalTokens: response.usage.input_tokens + response.usage.output_tokens,
|
|
2213
|
+
reasoningTokens: (_f = (_e = response.usage.output_tokens_details) == null ? void 0 : _e.reasoning_tokens) != null ? _f : void 0,
|
|
2214
|
+
cachedInputTokens: (_h = (_g = response.usage.input_tokens_details) == null ? void 0 : _g.cached_tokens) != null ? _h : void 0
|
|
2203
2215
|
},
|
|
2204
2216
|
request: { body },
|
|
2205
2217
|
response: {
|
|
@@ -2211,9 +2223,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2211
2223
|
},
|
|
2212
2224
|
providerMetadata: {
|
|
2213
2225
|
openai: {
|
|
2214
|
-
responseId: response.id
|
|
2215
|
-
cachedPromptTokens: (_f = (_e = response.usage.input_tokens_details) == null ? void 0 : _e.cached_tokens) != null ? _f : null,
|
|
2216
|
-
reasoningTokens: (_h = (_g = response.usage.output_tokens_details) == null ? void 0 : _g.reasoning_tokens) != null ? _h : null
|
|
2226
|
+
responseId: response.id
|
|
2217
2227
|
}
|
|
2218
2228
|
},
|
|
2219
2229
|
warnings
|
|
@@ -2242,10 +2252,9 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2242
2252
|
let finishReason = "unknown";
|
|
2243
2253
|
const usage = {
|
|
2244
2254
|
inputTokens: void 0,
|
|
2245
|
-
outputTokens: void 0
|
|
2255
|
+
outputTokens: void 0,
|
|
2256
|
+
totalTokens: void 0
|
|
2246
2257
|
};
|
|
2247
|
-
let cachedPromptTokens = null;
|
|
2248
|
-
let reasoningTokens = null;
|
|
2249
2258
|
let responseId = null;
|
|
2250
2259
|
const ongoingToolCalls = {};
|
|
2251
2260
|
let hasToolCalls = false;
|
|
@@ -2323,8 +2332,9 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2323
2332
|
});
|
|
2324
2333
|
usage.inputTokens = value.response.usage.input_tokens;
|
|
2325
2334
|
usage.outputTokens = value.response.usage.output_tokens;
|
|
2326
|
-
|
|
2327
|
-
reasoningTokens = (
|
|
2335
|
+
usage.totalTokens = value.response.usage.input_tokens + value.response.usage.output_tokens;
|
|
2336
|
+
usage.reasoningTokens = (_c = (_b = value.response.usage.output_tokens_details) == null ? void 0 : _b.reasoning_tokens) != null ? _c : void 0;
|
|
2337
|
+
usage.cachedInputTokens = (_e = (_d = value.response.usage.input_tokens_details) == null ? void 0 : _d.cached_tokens) != null ? _e : void 0;
|
|
2328
2338
|
} else if (isResponseAnnotationAddedChunk(value)) {
|
|
2329
2339
|
controller.enqueue({
|
|
2330
2340
|
type: "source",
|
|
@@ -2340,13 +2350,9 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2340
2350
|
type: "finish",
|
|
2341
2351
|
finishReason,
|
|
2342
2352
|
usage,
|
|
2343
|
-
|
|
2344
|
-
|
|
2345
|
-
|
|
2346
|
-
responseId,
|
|
2347
|
-
cachedPromptTokens,
|
|
2348
|
-
reasoningTokens
|
|
2349
|
-
}
|
|
2353
|
+
providerMetadata: {
|
|
2354
|
+
openai: {
|
|
2355
|
+
responseId
|
|
2350
2356
|
}
|
|
2351
2357
|
}
|
|
2352
2358
|
});
|
|
@@ -2358,7 +2364,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2358
2364
|
};
|
|
2359
2365
|
}
|
|
2360
2366
|
};
|
|
2361
|
-
var
|
|
2367
|
+
var usageSchema2 = z12.object({
|
|
2362
2368
|
input_tokens: z12.number(),
|
|
2363
2369
|
input_tokens_details: z12.object({ cached_tokens: z12.number().nullish() }).nullish(),
|
|
2364
2370
|
output_tokens: z12.number(),
|
|
@@ -2372,7 +2378,7 @@ var responseFinishedChunkSchema = z12.object({
|
|
|
2372
2378
|
type: z12.enum(["response.completed", "response.incomplete"]),
|
|
2373
2379
|
response: z12.object({
|
|
2374
2380
|
incomplete_details: z12.object({ reason: z12.string() }).nullish(),
|
|
2375
|
-
usage:
|
|
2381
|
+
usage: usageSchema2
|
|
2376
2382
|
})
|
|
2377
2383
|
});
|
|
2378
2384
|
var responseCreatedChunkSchema = z12.object({
|