@ai-sdk/openai 2.0.0-canary.15 → 2.0.0-canary.16
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +13 -0
- package/README.md +2 -2
- package/dist/index.js +79 -73
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +79 -73
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +15 -15
- package/dist/internal/index.d.ts +15 -15
- package/dist/internal/index.js +79 -73
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +79 -73
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +4 -4
package/CHANGELOG.md
CHANGED
|
@@ -1,5 +1,18 @@
|
|
|
1
1
|
# @ai-sdk/openai
|
|
2
2
|
|
|
3
|
+
## 2.0.0-canary.16
|
|
4
|
+
|
|
5
|
+
### Patch Changes
|
|
6
|
+
|
|
7
|
+
- 928fadf: fix(providers/openai): logprobs for stream alongside completion model
|
|
8
|
+
- 6f231db: fix(providers): always use optional instead of mix of nullish for providerOptions
|
|
9
|
+
- Updated dependencies [a571d6e]
|
|
10
|
+
- Updated dependencies [a8c8bd5]
|
|
11
|
+
- Updated dependencies [7979f7f]
|
|
12
|
+
- Updated dependencies [41fa418]
|
|
13
|
+
- @ai-sdk/provider-utils@3.0.0-canary.15
|
|
14
|
+
- @ai-sdk/provider@2.0.0-canary.14
|
|
15
|
+
|
|
3
16
|
## 2.0.0-canary.15
|
|
4
17
|
|
|
5
18
|
### Patch Changes
|
package/README.md
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
# AI SDK - OpenAI Provider
|
|
2
2
|
|
|
3
|
-
The **[OpenAI provider](https://sdk.
|
|
3
|
+
The **[OpenAI provider](https://ai-sdk.dev/providers/ai-sdk-providers/openai)** for the [AI SDK](https://ai-sdk.dev/docs)
|
|
4
4
|
contains language model support for the OpenAI chat and completion APIs and embedding model support for the OpenAI embeddings API.
|
|
5
5
|
|
|
6
6
|
## Setup
|
|
@@ -33,4 +33,4 @@ const { text } = await generateText({
|
|
|
33
33
|
|
|
34
34
|
## Documentation
|
|
35
35
|
|
|
36
|
-
Please check out the **[OpenAI provider documentation](https://sdk.
|
|
36
|
+
Please check out the **[OpenAI provider documentation](https://ai-sdk.dev/providers/ai-sdk-providers/openai)** for more information.
|
package/dist/index.js
CHANGED
|
@@ -545,7 +545,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
545
545
|
};
|
|
546
546
|
}
|
|
547
547
|
async doGenerate(options) {
|
|
548
|
-
var _a, _b, _c, _d, _e, _f, _g, _h, _i;
|
|
548
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m;
|
|
549
549
|
const { args: body, warnings } = await this.getArgs(options);
|
|
550
550
|
const {
|
|
551
551
|
responseHeaders,
|
|
@@ -583,18 +583,12 @@ var OpenAIChatLanguageModel = class {
|
|
|
583
583
|
const completionTokenDetails = (_c = response.usage) == null ? void 0 : _c.completion_tokens_details;
|
|
584
584
|
const promptTokenDetails = (_d = response.usage) == null ? void 0 : _d.prompt_tokens_details;
|
|
585
585
|
const providerMetadata = { openai: {} };
|
|
586
|
-
if ((completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null) {
|
|
587
|
-
providerMetadata.openai.reasoningTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens;
|
|
588
|
-
}
|
|
589
586
|
if ((completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens) != null) {
|
|
590
587
|
providerMetadata.openai.acceptedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens;
|
|
591
588
|
}
|
|
592
589
|
if ((completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens) != null) {
|
|
593
590
|
providerMetadata.openai.rejectedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens;
|
|
594
591
|
}
|
|
595
|
-
if ((promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens) != null) {
|
|
596
|
-
providerMetadata.openai.cachedPromptTokens = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens;
|
|
597
|
-
}
|
|
598
592
|
if (((_e = choice.logprobs) == null ? void 0 : _e.content) != null) {
|
|
599
593
|
providerMetadata.openai.logprobs = choice.logprobs.content;
|
|
600
594
|
}
|
|
@@ -603,7 +597,10 @@ var OpenAIChatLanguageModel = class {
|
|
|
603
597
|
finishReason: mapOpenAIFinishReason(choice.finish_reason),
|
|
604
598
|
usage: {
|
|
605
599
|
inputTokens: (_g = (_f = response.usage) == null ? void 0 : _f.prompt_tokens) != null ? _g : void 0,
|
|
606
|
-
outputTokens: (_i = (_h = response.usage) == null ? void 0 : _h.completion_tokens) != null ? _i : void 0
|
|
600
|
+
outputTokens: (_i = (_h = response.usage) == null ? void 0 : _h.completion_tokens) != null ? _i : void 0,
|
|
601
|
+
totalTokens: (_k = (_j = response.usage) == null ? void 0 : _j.total_tokens) != null ? _k : void 0,
|
|
602
|
+
reasoningTokens: (_l = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null ? _l : void 0,
|
|
603
|
+
cachedInputTokens: (_m = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens) != null ? _m : void 0
|
|
607
604
|
},
|
|
608
605
|
request: { body },
|
|
609
606
|
response: {
|
|
@@ -637,12 +634,12 @@ var OpenAIChatLanguageModel = class {
|
|
|
637
634
|
abortSignal: options.abortSignal,
|
|
638
635
|
fetch: this.config.fetch
|
|
639
636
|
});
|
|
640
|
-
const { messages: rawPrompt, ...rawSettings } = args;
|
|
641
637
|
const toolCalls = [];
|
|
642
638
|
let finishReason = "unknown";
|
|
643
639
|
const usage = {
|
|
644
640
|
inputTokens: void 0,
|
|
645
|
-
outputTokens: void 0
|
|
641
|
+
outputTokens: void 0,
|
|
642
|
+
totalTokens: void 0
|
|
646
643
|
};
|
|
647
644
|
let isFirstChunk = true;
|
|
648
645
|
const providerMetadata = { openai: {} };
|
|
@@ -653,7 +650,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
653
650
|
controller.enqueue({ type: "stream-start", warnings });
|
|
654
651
|
},
|
|
655
652
|
transform(chunk, controller) {
|
|
656
|
-
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l;
|
|
653
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x;
|
|
657
654
|
if (!chunk.success) {
|
|
658
655
|
finishReason = "error";
|
|
659
656
|
controller.enqueue({ type: "error", error: chunk.error });
|
|
@@ -673,31 +670,25 @@ var OpenAIChatLanguageModel = class {
|
|
|
673
670
|
});
|
|
674
671
|
}
|
|
675
672
|
if (value.usage != null) {
|
|
676
|
-
|
|
677
|
-
|
|
678
|
-
|
|
679
|
-
|
|
680
|
-
|
|
681
|
-
|
|
682
|
-
|
|
683
|
-
usage.outputTokens = completion_tokens != null ? completion_tokens : void 0;
|
|
684
|
-
if ((completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens) != null) {
|
|
685
|
-
providerMetadata.openai.reasoningTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens;
|
|
686
|
-
}
|
|
687
|
-
if ((completion_tokens_details == null ? void 0 : completion_tokens_details.accepted_prediction_tokens) != null) {
|
|
688
|
-
providerMetadata.openai.acceptedPredictionTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.accepted_prediction_tokens;
|
|
689
|
-
}
|
|
690
|
-
if ((completion_tokens_details == null ? void 0 : completion_tokens_details.rejected_prediction_tokens) != null) {
|
|
691
|
-
providerMetadata.openai.rejectedPredictionTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.rejected_prediction_tokens;
|
|
673
|
+
usage.inputTokens = (_a = value.usage.prompt_tokens) != null ? _a : void 0;
|
|
674
|
+
usage.outputTokens = (_b = value.usage.completion_tokens) != null ? _b : void 0;
|
|
675
|
+
usage.totalTokens = (_c = value.usage.total_tokens) != null ? _c : void 0;
|
|
676
|
+
usage.reasoningTokens = (_e = (_d = value.usage.completion_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : void 0;
|
|
677
|
+
usage.cachedInputTokens = (_g = (_f = value.usage.prompt_tokens_details) == null ? void 0 : _f.cached_tokens) != null ? _g : void 0;
|
|
678
|
+
if (((_h = value.usage.completion_tokens_details) == null ? void 0 : _h.accepted_prediction_tokens) != null) {
|
|
679
|
+
providerMetadata.openai.acceptedPredictionTokens = (_i = value.usage.completion_tokens_details) == null ? void 0 : _i.accepted_prediction_tokens;
|
|
692
680
|
}
|
|
693
|
-
if ((
|
|
694
|
-
providerMetadata.openai.
|
|
681
|
+
if (((_j = value.usage.completion_tokens_details) == null ? void 0 : _j.rejected_prediction_tokens) != null) {
|
|
682
|
+
providerMetadata.openai.rejectedPredictionTokens = (_k = value.usage.completion_tokens_details) == null ? void 0 : _k.rejected_prediction_tokens;
|
|
695
683
|
}
|
|
696
684
|
}
|
|
697
685
|
const choice = value.choices[0];
|
|
698
686
|
if ((choice == null ? void 0 : choice.finish_reason) != null) {
|
|
699
687
|
finishReason = mapOpenAIFinishReason(choice.finish_reason);
|
|
700
688
|
}
|
|
689
|
+
if (((_l = choice == null ? void 0 : choice.logprobs) == null ? void 0 : _l.content) != null) {
|
|
690
|
+
providerMetadata.openai.logprobs = choice.logprobs.content;
|
|
691
|
+
}
|
|
701
692
|
if ((choice == null ? void 0 : choice.delta) == null) {
|
|
702
693
|
return;
|
|
703
694
|
}
|
|
@@ -724,7 +715,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
724
715
|
message: `Expected 'id' to be a string.`
|
|
725
716
|
});
|
|
726
717
|
}
|
|
727
|
-
if (((
|
|
718
|
+
if (((_m = toolCallDelta.function) == null ? void 0 : _m.name) == null) {
|
|
728
719
|
throw new import_provider3.InvalidResponseDataError({
|
|
729
720
|
data: toolCallDelta,
|
|
730
721
|
message: `Expected 'function.name' to be a string.`
|
|
@@ -735,12 +726,12 @@ var OpenAIChatLanguageModel = class {
|
|
|
735
726
|
type: "function",
|
|
736
727
|
function: {
|
|
737
728
|
name: toolCallDelta.function.name,
|
|
738
|
-
arguments: (
|
|
729
|
+
arguments: (_n = toolCallDelta.function.arguments) != null ? _n : ""
|
|
739
730
|
},
|
|
740
731
|
hasFinished: false
|
|
741
732
|
};
|
|
742
733
|
const toolCall2 = toolCalls[index];
|
|
743
|
-
if (((
|
|
734
|
+
if (((_o = toolCall2.function) == null ? void 0 : _o.name) != null && ((_p = toolCall2.function) == null ? void 0 : _p.arguments) != null) {
|
|
744
735
|
if (toolCall2.function.arguments.length > 0) {
|
|
745
736
|
controller.enqueue({
|
|
746
737
|
type: "tool-call-delta",
|
|
@@ -754,7 +745,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
754
745
|
controller.enqueue({
|
|
755
746
|
type: "tool-call",
|
|
756
747
|
toolCallType: "function",
|
|
757
|
-
toolCallId: (
|
|
748
|
+
toolCallId: (_q = toolCall2.id) != null ? _q : (0, import_provider_utils3.generateId)(),
|
|
758
749
|
toolName: toolCall2.function.name,
|
|
759
750
|
args: toolCall2.function.arguments
|
|
760
751
|
});
|
|
@@ -767,21 +758,21 @@ var OpenAIChatLanguageModel = class {
|
|
|
767
758
|
if (toolCall.hasFinished) {
|
|
768
759
|
continue;
|
|
769
760
|
}
|
|
770
|
-
if (((
|
|
771
|
-
toolCall.function.arguments += (
|
|
761
|
+
if (((_r = toolCallDelta.function) == null ? void 0 : _r.arguments) != null) {
|
|
762
|
+
toolCall.function.arguments += (_t = (_s = toolCallDelta.function) == null ? void 0 : _s.arguments) != null ? _t : "";
|
|
772
763
|
}
|
|
773
764
|
controller.enqueue({
|
|
774
765
|
type: "tool-call-delta",
|
|
775
766
|
toolCallType: "function",
|
|
776
767
|
toolCallId: toolCall.id,
|
|
777
768
|
toolName: toolCall.function.name,
|
|
778
|
-
argsTextDelta: (
|
|
769
|
+
argsTextDelta: (_u = toolCallDelta.function.arguments) != null ? _u : ""
|
|
779
770
|
});
|
|
780
|
-
if (((
|
|
771
|
+
if (((_v = toolCall.function) == null ? void 0 : _v.name) != null && ((_w = toolCall.function) == null ? void 0 : _w.arguments) != null && (0, import_provider_utils3.isParsableJson)(toolCall.function.arguments)) {
|
|
781
772
|
controller.enqueue({
|
|
782
773
|
type: "tool-call",
|
|
783
774
|
toolCallType: "function",
|
|
784
|
-
toolCallId: (
|
|
775
|
+
toolCallId: (_x = toolCall.id) != null ? _x : (0, import_provider_utils3.generateId)(),
|
|
785
776
|
toolName: toolCall.function.name,
|
|
786
777
|
args: toolCall.function.arguments
|
|
787
778
|
});
|
|
@@ -808,6 +799,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
808
799
|
var openaiTokenUsageSchema = import_zod3.z.object({
|
|
809
800
|
prompt_tokens: import_zod3.z.number().nullish(),
|
|
810
801
|
completion_tokens: import_zod3.z.number().nullish(),
|
|
802
|
+
total_tokens: import_zod3.z.number().nullish(),
|
|
811
803
|
prompt_tokens_details: import_zod3.z.object({
|
|
812
804
|
cached_tokens: import_zod3.z.number().nullish()
|
|
813
805
|
}).nullish(),
|
|
@@ -879,6 +871,20 @@ var openaiChatChunkSchema = import_zod3.z.union([
|
|
|
879
871
|
})
|
|
880
872
|
).nullish()
|
|
881
873
|
}).nullish(),
|
|
874
|
+
logprobs: import_zod3.z.object({
|
|
875
|
+
content: import_zod3.z.array(
|
|
876
|
+
import_zod3.z.object({
|
|
877
|
+
token: import_zod3.z.string(),
|
|
878
|
+
logprob: import_zod3.z.number(),
|
|
879
|
+
top_logprobs: import_zod3.z.array(
|
|
880
|
+
import_zod3.z.object({
|
|
881
|
+
token: import_zod3.z.string(),
|
|
882
|
+
logprob: import_zod3.z.number()
|
|
883
|
+
})
|
|
884
|
+
)
|
|
885
|
+
})
|
|
886
|
+
).nullish()
|
|
887
|
+
}).nullish(),
|
|
882
888
|
finish_reason: import_zod3.z.string().nullish(),
|
|
883
889
|
index: import_zod3.z.number()
|
|
884
890
|
})
|
|
@@ -1140,6 +1146,7 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1140
1146
|
};
|
|
1141
1147
|
}
|
|
1142
1148
|
async doGenerate(options) {
|
|
1149
|
+
var _a, _b, _c;
|
|
1143
1150
|
const { args, warnings } = await this.getArgs(options);
|
|
1144
1151
|
const {
|
|
1145
1152
|
responseHeaders,
|
|
@@ -1167,8 +1174,9 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1167
1174
|
return {
|
|
1168
1175
|
content: [{ type: "text", text: choice.text }],
|
|
1169
1176
|
usage: {
|
|
1170
|
-
inputTokens: response.usage.prompt_tokens,
|
|
1171
|
-
outputTokens: response.usage.completion_tokens
|
|
1177
|
+
inputTokens: (_a = response.usage) == null ? void 0 : _a.prompt_tokens,
|
|
1178
|
+
outputTokens: (_b = response.usage) == null ? void 0 : _b.completion_tokens,
|
|
1179
|
+
totalTokens: (_c = response.usage) == null ? void 0 : _c.total_tokens
|
|
1172
1180
|
},
|
|
1173
1181
|
finishReason: mapOpenAIFinishReason(choice.finish_reason),
|
|
1174
1182
|
request: { body: args },
|
|
@@ -1207,7 +1215,8 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1207
1215
|
const providerMetadata = { openai: {} };
|
|
1208
1216
|
const usage = {
|
|
1209
1217
|
inputTokens: void 0,
|
|
1210
|
-
outputTokens: void 0
|
|
1218
|
+
outputTokens: void 0,
|
|
1219
|
+
totalTokens: void 0
|
|
1211
1220
|
};
|
|
1212
1221
|
let isFirstChunk = true;
|
|
1213
1222
|
return {
|
|
@@ -1238,6 +1247,7 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1238
1247
|
if (value.usage != null) {
|
|
1239
1248
|
usage.inputTokens = value.usage.prompt_tokens;
|
|
1240
1249
|
usage.outputTokens = value.usage.completion_tokens;
|
|
1250
|
+
usage.totalTokens = value.usage.total_tokens;
|
|
1241
1251
|
}
|
|
1242
1252
|
const choice = value.choices[0];
|
|
1243
1253
|
if ((choice == null ? void 0 : choice.finish_reason) != null) {
|
|
@@ -1268,6 +1278,11 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1268
1278
|
};
|
|
1269
1279
|
}
|
|
1270
1280
|
};
|
|
1281
|
+
var usageSchema = import_zod5.z.object({
|
|
1282
|
+
prompt_tokens: import_zod5.z.number(),
|
|
1283
|
+
completion_tokens: import_zod5.z.number(),
|
|
1284
|
+
total_tokens: import_zod5.z.number()
|
|
1285
|
+
});
|
|
1271
1286
|
var openaiCompletionResponseSchema = import_zod5.z.object({
|
|
1272
1287
|
id: import_zod5.z.string().nullish(),
|
|
1273
1288
|
created: import_zod5.z.number().nullish(),
|
|
@@ -1283,10 +1298,7 @@ var openaiCompletionResponseSchema = import_zod5.z.object({
|
|
|
1283
1298
|
}).nullish()
|
|
1284
1299
|
})
|
|
1285
1300
|
),
|
|
1286
|
-
usage:
|
|
1287
|
-
prompt_tokens: import_zod5.z.number(),
|
|
1288
|
-
completion_tokens: import_zod5.z.number()
|
|
1289
|
-
})
|
|
1301
|
+
usage: usageSchema.nullish()
|
|
1290
1302
|
});
|
|
1291
1303
|
var openaiCompletionChunkSchema = import_zod5.z.union([
|
|
1292
1304
|
import_zod5.z.object({
|
|
@@ -1305,10 +1317,7 @@ var openaiCompletionChunkSchema = import_zod5.z.union([
|
|
|
1305
1317
|
}).nullish()
|
|
1306
1318
|
})
|
|
1307
1319
|
),
|
|
1308
|
-
usage:
|
|
1309
|
-
prompt_tokens: import_zod5.z.number(),
|
|
1310
|
-
completion_tokens: import_zod5.z.number()
|
|
1311
|
-
}).nullish()
|
|
1320
|
+
usage: usageSchema.nullish()
|
|
1312
1321
|
}),
|
|
1313
1322
|
openaiErrorDataSchema
|
|
1314
1323
|
]);
|
|
@@ -1529,25 +1538,25 @@ var openAITranscriptionProviderOptions = import_zod10.z.object({
|
|
|
1529
1538
|
/**
|
|
1530
1539
|
* Additional information to include in the transcription response.
|
|
1531
1540
|
*/
|
|
1532
|
-
include: import_zod10.z.array(import_zod10.z.string()).
|
|
1541
|
+
include: import_zod10.z.array(import_zod10.z.string()).optional(),
|
|
1533
1542
|
/**
|
|
1534
1543
|
* The language of the input audio in ISO-639-1 format.
|
|
1535
1544
|
*/
|
|
1536
|
-
language: import_zod10.z.string().
|
|
1545
|
+
language: import_zod10.z.string().optional(),
|
|
1537
1546
|
/**
|
|
1538
1547
|
* An optional text to guide the model's style or continue a previous audio segment.
|
|
1539
1548
|
*/
|
|
1540
|
-
prompt: import_zod10.z.string().
|
|
1549
|
+
prompt: import_zod10.z.string().optional(),
|
|
1541
1550
|
/**
|
|
1542
1551
|
* The sampling temperature, between 0 and 1.
|
|
1543
1552
|
* @default 0
|
|
1544
1553
|
*/
|
|
1545
|
-
temperature: import_zod10.z.number().min(0).max(1).default(0).
|
|
1554
|
+
temperature: import_zod10.z.number().min(0).max(1).default(0).optional(),
|
|
1546
1555
|
/**
|
|
1547
1556
|
* The timestamp granularities to populate for this transcription.
|
|
1548
1557
|
* @default ['segment']
|
|
1549
1558
|
*/
|
|
1550
|
-
timestampGranularities: import_zod10.z.array(import_zod10.z.enum(["word", "segment"])).default(["segment"]).
|
|
1559
|
+
timestampGranularities: import_zod10.z.array(import_zod10.z.enum(["word", "segment"])).default(["segment"]).optional()
|
|
1551
1560
|
});
|
|
1552
1561
|
|
|
1553
1562
|
// src/openai-transcription-model.ts
|
|
@@ -2118,7 +2127,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2118
2127
|
])
|
|
2119
2128
|
),
|
|
2120
2129
|
incomplete_details: import_zod12.z.object({ reason: import_zod12.z.string() }).nullable(),
|
|
2121
|
-
usage:
|
|
2130
|
+
usage: usageSchema2
|
|
2122
2131
|
})
|
|
2123
2132
|
),
|
|
2124
2133
|
abortSignal: options.abortSignal,
|
|
@@ -2172,7 +2181,10 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2172
2181
|
}),
|
|
2173
2182
|
usage: {
|
|
2174
2183
|
inputTokens: response.usage.input_tokens,
|
|
2175
|
-
outputTokens: response.usage.output_tokens
|
|
2184
|
+
outputTokens: response.usage.output_tokens,
|
|
2185
|
+
totalTokens: response.usage.input_tokens + response.usage.output_tokens,
|
|
2186
|
+
reasoningTokens: (_f = (_e = response.usage.output_tokens_details) == null ? void 0 : _e.reasoning_tokens) != null ? _f : void 0,
|
|
2187
|
+
cachedInputTokens: (_h = (_g = response.usage.input_tokens_details) == null ? void 0 : _g.cached_tokens) != null ? _h : void 0
|
|
2176
2188
|
},
|
|
2177
2189
|
request: { body },
|
|
2178
2190
|
response: {
|
|
@@ -2184,9 +2196,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2184
2196
|
},
|
|
2185
2197
|
providerMetadata: {
|
|
2186
2198
|
openai: {
|
|
2187
|
-
responseId: response.id
|
|
2188
|
-
cachedPromptTokens: (_f = (_e = response.usage.input_tokens_details) == null ? void 0 : _e.cached_tokens) != null ? _f : null,
|
|
2189
|
-
reasoningTokens: (_h = (_g = response.usage.output_tokens_details) == null ? void 0 : _g.reasoning_tokens) != null ? _h : null
|
|
2199
|
+
responseId: response.id
|
|
2190
2200
|
}
|
|
2191
2201
|
},
|
|
2192
2202
|
warnings
|
|
@@ -2215,10 +2225,9 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2215
2225
|
let finishReason = "unknown";
|
|
2216
2226
|
const usage = {
|
|
2217
2227
|
inputTokens: void 0,
|
|
2218
|
-
outputTokens: void 0
|
|
2228
|
+
outputTokens: void 0,
|
|
2229
|
+
totalTokens: void 0
|
|
2219
2230
|
};
|
|
2220
|
-
let cachedPromptTokens = null;
|
|
2221
|
-
let reasoningTokens = null;
|
|
2222
2231
|
let responseId = null;
|
|
2223
2232
|
const ongoingToolCalls = {};
|
|
2224
2233
|
let hasToolCalls = false;
|
|
@@ -2296,8 +2305,9 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2296
2305
|
});
|
|
2297
2306
|
usage.inputTokens = value.response.usage.input_tokens;
|
|
2298
2307
|
usage.outputTokens = value.response.usage.output_tokens;
|
|
2299
|
-
|
|
2300
|
-
reasoningTokens = (
|
|
2308
|
+
usage.totalTokens = value.response.usage.input_tokens + value.response.usage.output_tokens;
|
|
2309
|
+
usage.reasoningTokens = (_c = (_b = value.response.usage.output_tokens_details) == null ? void 0 : _b.reasoning_tokens) != null ? _c : void 0;
|
|
2310
|
+
usage.cachedInputTokens = (_e = (_d = value.response.usage.input_tokens_details) == null ? void 0 : _d.cached_tokens) != null ? _e : void 0;
|
|
2301
2311
|
} else if (isResponseAnnotationAddedChunk(value)) {
|
|
2302
2312
|
controller.enqueue({
|
|
2303
2313
|
type: "source",
|
|
@@ -2313,13 +2323,9 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2313
2323
|
type: "finish",
|
|
2314
2324
|
finishReason,
|
|
2315
2325
|
usage,
|
|
2316
|
-
|
|
2317
|
-
|
|
2318
|
-
|
|
2319
|
-
responseId,
|
|
2320
|
-
cachedPromptTokens,
|
|
2321
|
-
reasoningTokens
|
|
2322
|
-
}
|
|
2326
|
+
providerMetadata: {
|
|
2327
|
+
openai: {
|
|
2328
|
+
responseId
|
|
2323
2329
|
}
|
|
2324
2330
|
}
|
|
2325
2331
|
});
|
|
@@ -2331,7 +2337,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2331
2337
|
};
|
|
2332
2338
|
}
|
|
2333
2339
|
};
|
|
2334
|
-
var
|
|
2340
|
+
var usageSchema2 = import_zod12.z.object({
|
|
2335
2341
|
input_tokens: import_zod12.z.number(),
|
|
2336
2342
|
input_tokens_details: import_zod12.z.object({ cached_tokens: import_zod12.z.number().nullish() }).nullish(),
|
|
2337
2343
|
output_tokens: import_zod12.z.number(),
|
|
@@ -2345,7 +2351,7 @@ var responseFinishedChunkSchema = import_zod12.z.object({
|
|
|
2345
2351
|
type: import_zod12.z.enum(["response.completed", "response.incomplete"]),
|
|
2346
2352
|
response: import_zod12.z.object({
|
|
2347
2353
|
incomplete_details: import_zod12.z.object({ reason: import_zod12.z.string() }).nullish(),
|
|
2348
|
-
usage:
|
|
2354
|
+
usage: usageSchema2
|
|
2349
2355
|
})
|
|
2350
2356
|
});
|
|
2351
2357
|
var responseCreatedChunkSchema = import_zod12.z.object({
|