@ai-sdk/openai 2.0.0-canary.15 → 2.0.0-canary.17
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +21 -0
- package/README.md +2 -2
- package/dist/index.d.mts +1 -8
- package/dist/index.d.ts +1 -8
- package/dist/index.js +88 -86
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +88 -86
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +15 -17
- package/dist/internal/index.d.ts +15 -17
- package/dist/internal/index.js +85 -77
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +85 -77
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +4 -4
package/dist/index.mjs
CHANGED
|
@@ -535,7 +535,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
535
535
|
};
|
|
536
536
|
}
|
|
537
537
|
async doGenerate(options) {
|
|
538
|
-
var _a, _b, _c, _d, _e, _f, _g, _h, _i;
|
|
538
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m;
|
|
539
539
|
const { args: body, warnings } = await this.getArgs(options);
|
|
540
540
|
const {
|
|
541
541
|
responseHeaders,
|
|
@@ -573,18 +573,12 @@ var OpenAIChatLanguageModel = class {
|
|
|
573
573
|
const completionTokenDetails = (_c = response.usage) == null ? void 0 : _c.completion_tokens_details;
|
|
574
574
|
const promptTokenDetails = (_d = response.usage) == null ? void 0 : _d.prompt_tokens_details;
|
|
575
575
|
const providerMetadata = { openai: {} };
|
|
576
|
-
if ((completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null) {
|
|
577
|
-
providerMetadata.openai.reasoningTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens;
|
|
578
|
-
}
|
|
579
576
|
if ((completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens) != null) {
|
|
580
577
|
providerMetadata.openai.acceptedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens;
|
|
581
578
|
}
|
|
582
579
|
if ((completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens) != null) {
|
|
583
580
|
providerMetadata.openai.rejectedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens;
|
|
584
581
|
}
|
|
585
|
-
if ((promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens) != null) {
|
|
586
|
-
providerMetadata.openai.cachedPromptTokens = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens;
|
|
587
|
-
}
|
|
588
582
|
if (((_e = choice.logprobs) == null ? void 0 : _e.content) != null) {
|
|
589
583
|
providerMetadata.openai.logprobs = choice.logprobs.content;
|
|
590
584
|
}
|
|
@@ -593,7 +587,10 @@ var OpenAIChatLanguageModel = class {
|
|
|
593
587
|
finishReason: mapOpenAIFinishReason(choice.finish_reason),
|
|
594
588
|
usage: {
|
|
595
589
|
inputTokens: (_g = (_f = response.usage) == null ? void 0 : _f.prompt_tokens) != null ? _g : void 0,
|
|
596
|
-
outputTokens: (_i = (_h = response.usage) == null ? void 0 : _h.completion_tokens) != null ? _i : void 0
|
|
590
|
+
outputTokens: (_i = (_h = response.usage) == null ? void 0 : _h.completion_tokens) != null ? _i : void 0,
|
|
591
|
+
totalTokens: (_k = (_j = response.usage) == null ? void 0 : _j.total_tokens) != null ? _k : void 0,
|
|
592
|
+
reasoningTokens: (_l = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null ? _l : void 0,
|
|
593
|
+
cachedInputTokens: (_m = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens) != null ? _m : void 0
|
|
597
594
|
},
|
|
598
595
|
request: { body },
|
|
599
596
|
response: {
|
|
@@ -610,8 +607,9 @@ var OpenAIChatLanguageModel = class {
|
|
|
610
607
|
const body = {
|
|
611
608
|
...args,
|
|
612
609
|
stream: true,
|
|
613
|
-
|
|
614
|
-
|
|
610
|
+
stream_options: {
|
|
611
|
+
include_usage: true
|
|
612
|
+
}
|
|
615
613
|
};
|
|
616
614
|
const { responseHeaders, value: response } = await postJsonToApi({
|
|
617
615
|
url: this.config.url({
|
|
@@ -627,12 +625,12 @@ var OpenAIChatLanguageModel = class {
|
|
|
627
625
|
abortSignal: options.abortSignal,
|
|
628
626
|
fetch: this.config.fetch
|
|
629
627
|
});
|
|
630
|
-
const { messages: rawPrompt, ...rawSettings } = args;
|
|
631
628
|
const toolCalls = [];
|
|
632
629
|
let finishReason = "unknown";
|
|
633
630
|
const usage = {
|
|
634
631
|
inputTokens: void 0,
|
|
635
|
-
outputTokens: void 0
|
|
632
|
+
outputTokens: void 0,
|
|
633
|
+
totalTokens: void 0
|
|
636
634
|
};
|
|
637
635
|
let isFirstChunk = true;
|
|
638
636
|
const providerMetadata = { openai: {} };
|
|
@@ -643,7 +641,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
643
641
|
controller.enqueue({ type: "stream-start", warnings });
|
|
644
642
|
},
|
|
645
643
|
transform(chunk, controller) {
|
|
646
|
-
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l;
|
|
644
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x;
|
|
647
645
|
if (!chunk.success) {
|
|
648
646
|
finishReason = "error";
|
|
649
647
|
controller.enqueue({ type: "error", error: chunk.error });
|
|
@@ -663,31 +661,25 @@ var OpenAIChatLanguageModel = class {
|
|
|
663
661
|
});
|
|
664
662
|
}
|
|
665
663
|
if (value.usage != null) {
|
|
666
|
-
|
|
667
|
-
|
|
668
|
-
|
|
669
|
-
|
|
670
|
-
|
|
671
|
-
|
|
672
|
-
|
|
673
|
-
usage.outputTokens = completion_tokens != null ? completion_tokens : void 0;
|
|
674
|
-
if ((completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens) != null) {
|
|
675
|
-
providerMetadata.openai.reasoningTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens;
|
|
676
|
-
}
|
|
677
|
-
if ((completion_tokens_details == null ? void 0 : completion_tokens_details.accepted_prediction_tokens) != null) {
|
|
678
|
-
providerMetadata.openai.acceptedPredictionTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.accepted_prediction_tokens;
|
|
679
|
-
}
|
|
680
|
-
if ((completion_tokens_details == null ? void 0 : completion_tokens_details.rejected_prediction_tokens) != null) {
|
|
681
|
-
providerMetadata.openai.rejectedPredictionTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.rejected_prediction_tokens;
|
|
664
|
+
usage.inputTokens = (_a = value.usage.prompt_tokens) != null ? _a : void 0;
|
|
665
|
+
usage.outputTokens = (_b = value.usage.completion_tokens) != null ? _b : void 0;
|
|
666
|
+
usage.totalTokens = (_c = value.usage.total_tokens) != null ? _c : void 0;
|
|
667
|
+
usage.reasoningTokens = (_e = (_d = value.usage.completion_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : void 0;
|
|
668
|
+
usage.cachedInputTokens = (_g = (_f = value.usage.prompt_tokens_details) == null ? void 0 : _f.cached_tokens) != null ? _g : void 0;
|
|
669
|
+
if (((_h = value.usage.completion_tokens_details) == null ? void 0 : _h.accepted_prediction_tokens) != null) {
|
|
670
|
+
providerMetadata.openai.acceptedPredictionTokens = (_i = value.usage.completion_tokens_details) == null ? void 0 : _i.accepted_prediction_tokens;
|
|
682
671
|
}
|
|
683
|
-
if ((
|
|
684
|
-
providerMetadata.openai.
|
|
672
|
+
if (((_j = value.usage.completion_tokens_details) == null ? void 0 : _j.rejected_prediction_tokens) != null) {
|
|
673
|
+
providerMetadata.openai.rejectedPredictionTokens = (_k = value.usage.completion_tokens_details) == null ? void 0 : _k.rejected_prediction_tokens;
|
|
685
674
|
}
|
|
686
675
|
}
|
|
687
676
|
const choice = value.choices[0];
|
|
688
677
|
if ((choice == null ? void 0 : choice.finish_reason) != null) {
|
|
689
678
|
finishReason = mapOpenAIFinishReason(choice.finish_reason);
|
|
690
679
|
}
|
|
680
|
+
if (((_l = choice == null ? void 0 : choice.logprobs) == null ? void 0 : _l.content) != null) {
|
|
681
|
+
providerMetadata.openai.logprobs = choice.logprobs.content;
|
|
682
|
+
}
|
|
691
683
|
if ((choice == null ? void 0 : choice.delta) == null) {
|
|
692
684
|
return;
|
|
693
685
|
}
|
|
@@ -714,7 +706,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
714
706
|
message: `Expected 'id' to be a string.`
|
|
715
707
|
});
|
|
716
708
|
}
|
|
717
|
-
if (((
|
|
709
|
+
if (((_m = toolCallDelta.function) == null ? void 0 : _m.name) == null) {
|
|
718
710
|
throw new InvalidResponseDataError({
|
|
719
711
|
data: toolCallDelta,
|
|
720
712
|
message: `Expected 'function.name' to be a string.`
|
|
@@ -725,12 +717,12 @@ var OpenAIChatLanguageModel = class {
|
|
|
725
717
|
type: "function",
|
|
726
718
|
function: {
|
|
727
719
|
name: toolCallDelta.function.name,
|
|
728
|
-
arguments: (
|
|
720
|
+
arguments: (_n = toolCallDelta.function.arguments) != null ? _n : ""
|
|
729
721
|
},
|
|
730
722
|
hasFinished: false
|
|
731
723
|
};
|
|
732
724
|
const toolCall2 = toolCalls[index];
|
|
733
|
-
if (((
|
|
725
|
+
if (((_o = toolCall2.function) == null ? void 0 : _o.name) != null && ((_p = toolCall2.function) == null ? void 0 : _p.arguments) != null) {
|
|
734
726
|
if (toolCall2.function.arguments.length > 0) {
|
|
735
727
|
controller.enqueue({
|
|
736
728
|
type: "tool-call-delta",
|
|
@@ -744,7 +736,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
744
736
|
controller.enqueue({
|
|
745
737
|
type: "tool-call",
|
|
746
738
|
toolCallType: "function",
|
|
747
|
-
toolCallId: (
|
|
739
|
+
toolCallId: (_q = toolCall2.id) != null ? _q : generateId(),
|
|
748
740
|
toolName: toolCall2.function.name,
|
|
749
741
|
args: toolCall2.function.arguments
|
|
750
742
|
});
|
|
@@ -757,21 +749,21 @@ var OpenAIChatLanguageModel = class {
|
|
|
757
749
|
if (toolCall.hasFinished) {
|
|
758
750
|
continue;
|
|
759
751
|
}
|
|
760
|
-
if (((
|
|
761
|
-
toolCall.function.arguments += (
|
|
752
|
+
if (((_r = toolCallDelta.function) == null ? void 0 : _r.arguments) != null) {
|
|
753
|
+
toolCall.function.arguments += (_t = (_s = toolCallDelta.function) == null ? void 0 : _s.arguments) != null ? _t : "";
|
|
762
754
|
}
|
|
763
755
|
controller.enqueue({
|
|
764
756
|
type: "tool-call-delta",
|
|
765
757
|
toolCallType: "function",
|
|
766
758
|
toolCallId: toolCall.id,
|
|
767
759
|
toolName: toolCall.function.name,
|
|
768
|
-
argsTextDelta: (
|
|
760
|
+
argsTextDelta: (_u = toolCallDelta.function.arguments) != null ? _u : ""
|
|
769
761
|
});
|
|
770
|
-
if (((
|
|
762
|
+
if (((_v = toolCall.function) == null ? void 0 : _v.name) != null && ((_w = toolCall.function) == null ? void 0 : _w.arguments) != null && isParsableJson(toolCall.function.arguments)) {
|
|
771
763
|
controller.enqueue({
|
|
772
764
|
type: "tool-call",
|
|
773
765
|
toolCallType: "function",
|
|
774
|
-
toolCallId: (
|
|
766
|
+
toolCallId: (_x = toolCall.id) != null ? _x : generateId(),
|
|
775
767
|
toolName: toolCall.function.name,
|
|
776
768
|
args: toolCall.function.arguments
|
|
777
769
|
});
|
|
@@ -798,6 +790,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
798
790
|
var openaiTokenUsageSchema = z3.object({
|
|
799
791
|
prompt_tokens: z3.number().nullish(),
|
|
800
792
|
completion_tokens: z3.number().nullish(),
|
|
793
|
+
total_tokens: z3.number().nullish(),
|
|
801
794
|
prompt_tokens_details: z3.object({
|
|
802
795
|
cached_tokens: z3.number().nullish()
|
|
803
796
|
}).nullish(),
|
|
@@ -869,6 +862,20 @@ var openaiChatChunkSchema = z3.union([
|
|
|
869
862
|
})
|
|
870
863
|
).nullish()
|
|
871
864
|
}).nullish(),
|
|
865
|
+
logprobs: z3.object({
|
|
866
|
+
content: z3.array(
|
|
867
|
+
z3.object({
|
|
868
|
+
token: z3.string(),
|
|
869
|
+
logprob: z3.number(),
|
|
870
|
+
top_logprobs: z3.array(
|
|
871
|
+
z3.object({
|
|
872
|
+
token: z3.string(),
|
|
873
|
+
logprob: z3.number()
|
|
874
|
+
})
|
|
875
|
+
)
|
|
876
|
+
})
|
|
877
|
+
).nullish()
|
|
878
|
+
}).nullish(),
|
|
872
879
|
finish_reason: z3.string().nullish(),
|
|
873
880
|
index: z3.number()
|
|
874
881
|
})
|
|
@@ -1139,6 +1146,7 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1139
1146
|
};
|
|
1140
1147
|
}
|
|
1141
1148
|
async doGenerate(options) {
|
|
1149
|
+
var _a, _b, _c;
|
|
1142
1150
|
const { args, warnings } = await this.getArgs(options);
|
|
1143
1151
|
const {
|
|
1144
1152
|
responseHeaders,
|
|
@@ -1166,8 +1174,9 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1166
1174
|
return {
|
|
1167
1175
|
content: [{ type: "text", text: choice.text }],
|
|
1168
1176
|
usage: {
|
|
1169
|
-
inputTokens: response.usage.prompt_tokens,
|
|
1170
|
-
outputTokens: response.usage.completion_tokens
|
|
1177
|
+
inputTokens: (_a = response.usage) == null ? void 0 : _a.prompt_tokens,
|
|
1178
|
+
outputTokens: (_b = response.usage) == null ? void 0 : _b.completion_tokens,
|
|
1179
|
+
totalTokens: (_c = response.usage) == null ? void 0 : _c.total_tokens
|
|
1171
1180
|
},
|
|
1172
1181
|
finishReason: mapOpenAIFinishReason(choice.finish_reason),
|
|
1173
1182
|
request: { body: args },
|
|
@@ -1185,8 +1194,9 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1185
1194
|
const body = {
|
|
1186
1195
|
...args,
|
|
1187
1196
|
stream: true,
|
|
1188
|
-
|
|
1189
|
-
|
|
1197
|
+
stream_options: {
|
|
1198
|
+
include_usage: true
|
|
1199
|
+
}
|
|
1190
1200
|
};
|
|
1191
1201
|
const { responseHeaders, value: response } = await postJsonToApi2({
|
|
1192
1202
|
url: this.config.url({
|
|
@@ -1206,7 +1216,8 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1206
1216
|
const providerMetadata = { openai: {} };
|
|
1207
1217
|
const usage = {
|
|
1208
1218
|
inputTokens: void 0,
|
|
1209
|
-
outputTokens: void 0
|
|
1219
|
+
outputTokens: void 0,
|
|
1220
|
+
totalTokens: void 0
|
|
1210
1221
|
};
|
|
1211
1222
|
let isFirstChunk = true;
|
|
1212
1223
|
return {
|
|
@@ -1237,6 +1248,7 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1237
1248
|
if (value.usage != null) {
|
|
1238
1249
|
usage.inputTokens = value.usage.prompt_tokens;
|
|
1239
1250
|
usage.outputTokens = value.usage.completion_tokens;
|
|
1251
|
+
usage.totalTokens = value.usage.total_tokens;
|
|
1240
1252
|
}
|
|
1241
1253
|
const choice = value.choices[0];
|
|
1242
1254
|
if ((choice == null ? void 0 : choice.finish_reason) != null) {
|
|
@@ -1267,6 +1279,11 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1267
1279
|
};
|
|
1268
1280
|
}
|
|
1269
1281
|
};
|
|
1282
|
+
var usageSchema = z5.object({
|
|
1283
|
+
prompt_tokens: z5.number(),
|
|
1284
|
+
completion_tokens: z5.number(),
|
|
1285
|
+
total_tokens: z5.number()
|
|
1286
|
+
});
|
|
1270
1287
|
var openaiCompletionResponseSchema = z5.object({
|
|
1271
1288
|
id: z5.string().nullish(),
|
|
1272
1289
|
created: z5.number().nullish(),
|
|
@@ -1282,10 +1299,7 @@ var openaiCompletionResponseSchema = z5.object({
|
|
|
1282
1299
|
}).nullish()
|
|
1283
1300
|
})
|
|
1284
1301
|
),
|
|
1285
|
-
usage:
|
|
1286
|
-
prompt_tokens: z5.number(),
|
|
1287
|
-
completion_tokens: z5.number()
|
|
1288
|
-
})
|
|
1302
|
+
usage: usageSchema.nullish()
|
|
1289
1303
|
});
|
|
1290
1304
|
var openaiCompletionChunkSchema = z5.union([
|
|
1291
1305
|
z5.object({
|
|
@@ -1304,10 +1318,7 @@ var openaiCompletionChunkSchema = z5.union([
|
|
|
1304
1318
|
}).nullish()
|
|
1305
1319
|
})
|
|
1306
1320
|
),
|
|
1307
|
-
usage:
|
|
1308
|
-
prompt_tokens: z5.number(),
|
|
1309
|
-
completion_tokens: z5.number()
|
|
1310
|
-
}).nullish()
|
|
1321
|
+
usage: usageSchema.nullish()
|
|
1311
1322
|
}),
|
|
1312
1323
|
openaiErrorDataSchema
|
|
1313
1324
|
]);
|
|
@@ -1545,25 +1556,25 @@ var openAITranscriptionProviderOptions = z10.object({
|
|
|
1545
1556
|
/**
|
|
1546
1557
|
* Additional information to include in the transcription response.
|
|
1547
1558
|
*/
|
|
1548
|
-
include: z10.array(z10.string()).
|
|
1559
|
+
include: z10.array(z10.string()).optional(),
|
|
1549
1560
|
/**
|
|
1550
1561
|
* The language of the input audio in ISO-639-1 format.
|
|
1551
1562
|
*/
|
|
1552
|
-
language: z10.string().
|
|
1563
|
+
language: z10.string().optional(),
|
|
1553
1564
|
/**
|
|
1554
1565
|
* An optional text to guide the model's style or continue a previous audio segment.
|
|
1555
1566
|
*/
|
|
1556
|
-
prompt: z10.string().
|
|
1567
|
+
prompt: z10.string().optional(),
|
|
1557
1568
|
/**
|
|
1558
1569
|
* The sampling temperature, between 0 and 1.
|
|
1559
1570
|
* @default 0
|
|
1560
1571
|
*/
|
|
1561
|
-
temperature: z10.number().min(0).max(1).default(0).
|
|
1572
|
+
temperature: z10.number().min(0).max(1).default(0).optional(),
|
|
1562
1573
|
/**
|
|
1563
1574
|
* The timestamp granularities to populate for this transcription.
|
|
1564
1575
|
* @default ['segment']
|
|
1565
1576
|
*/
|
|
1566
|
-
timestampGranularities: z10.array(z10.enum(["word", "segment"])).default(["segment"]).
|
|
1577
|
+
timestampGranularities: z10.array(z10.enum(["word", "segment"])).default(["segment"]).optional()
|
|
1567
1578
|
});
|
|
1568
1579
|
|
|
1569
1580
|
// src/openai-transcription-model.ts
|
|
@@ -2145,7 +2156,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2145
2156
|
])
|
|
2146
2157
|
),
|
|
2147
2158
|
incomplete_details: z12.object({ reason: z12.string() }).nullable(),
|
|
2148
|
-
usage:
|
|
2159
|
+
usage: usageSchema2
|
|
2149
2160
|
})
|
|
2150
2161
|
),
|
|
2151
2162
|
abortSignal: options.abortSignal,
|
|
@@ -2199,7 +2210,10 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2199
2210
|
}),
|
|
2200
2211
|
usage: {
|
|
2201
2212
|
inputTokens: response.usage.input_tokens,
|
|
2202
|
-
outputTokens: response.usage.output_tokens
|
|
2213
|
+
outputTokens: response.usage.output_tokens,
|
|
2214
|
+
totalTokens: response.usage.input_tokens + response.usage.output_tokens,
|
|
2215
|
+
reasoningTokens: (_f = (_e = response.usage.output_tokens_details) == null ? void 0 : _e.reasoning_tokens) != null ? _f : void 0,
|
|
2216
|
+
cachedInputTokens: (_h = (_g = response.usage.input_tokens_details) == null ? void 0 : _g.cached_tokens) != null ? _h : void 0
|
|
2203
2217
|
},
|
|
2204
2218
|
request: { body },
|
|
2205
2219
|
response: {
|
|
@@ -2211,9 +2225,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2211
2225
|
},
|
|
2212
2226
|
providerMetadata: {
|
|
2213
2227
|
openai: {
|
|
2214
|
-
responseId: response.id
|
|
2215
|
-
cachedPromptTokens: (_f = (_e = response.usage.input_tokens_details) == null ? void 0 : _e.cached_tokens) != null ? _f : null,
|
|
2216
|
-
reasoningTokens: (_h = (_g = response.usage.output_tokens_details) == null ? void 0 : _g.reasoning_tokens) != null ? _h : null
|
|
2228
|
+
responseId: response.id
|
|
2217
2229
|
}
|
|
2218
2230
|
},
|
|
2219
2231
|
warnings
|
|
@@ -2242,10 +2254,9 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2242
2254
|
let finishReason = "unknown";
|
|
2243
2255
|
const usage = {
|
|
2244
2256
|
inputTokens: void 0,
|
|
2245
|
-
outputTokens: void 0
|
|
2257
|
+
outputTokens: void 0,
|
|
2258
|
+
totalTokens: void 0
|
|
2246
2259
|
};
|
|
2247
|
-
let cachedPromptTokens = null;
|
|
2248
|
-
let reasoningTokens = null;
|
|
2249
2260
|
let responseId = null;
|
|
2250
2261
|
const ongoingToolCalls = {};
|
|
2251
2262
|
let hasToolCalls = false;
|
|
@@ -2323,8 +2334,9 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2323
2334
|
});
|
|
2324
2335
|
usage.inputTokens = value.response.usage.input_tokens;
|
|
2325
2336
|
usage.outputTokens = value.response.usage.output_tokens;
|
|
2326
|
-
|
|
2327
|
-
reasoningTokens = (
|
|
2337
|
+
usage.totalTokens = value.response.usage.input_tokens + value.response.usage.output_tokens;
|
|
2338
|
+
usage.reasoningTokens = (_c = (_b = value.response.usage.output_tokens_details) == null ? void 0 : _b.reasoning_tokens) != null ? _c : void 0;
|
|
2339
|
+
usage.cachedInputTokens = (_e = (_d = value.response.usage.input_tokens_details) == null ? void 0 : _d.cached_tokens) != null ? _e : void 0;
|
|
2328
2340
|
} else if (isResponseAnnotationAddedChunk(value)) {
|
|
2329
2341
|
controller.enqueue({
|
|
2330
2342
|
type: "source",
|
|
@@ -2340,13 +2352,9 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2340
2352
|
type: "finish",
|
|
2341
2353
|
finishReason,
|
|
2342
2354
|
usage,
|
|
2343
|
-
|
|
2344
|
-
|
|
2345
|
-
|
|
2346
|
-
responseId,
|
|
2347
|
-
cachedPromptTokens,
|
|
2348
|
-
reasoningTokens
|
|
2349
|
-
}
|
|
2355
|
+
providerMetadata: {
|
|
2356
|
+
openai: {
|
|
2357
|
+
responseId
|
|
2350
2358
|
}
|
|
2351
2359
|
}
|
|
2352
2360
|
});
|
|
@@ -2358,7 +2366,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2358
2366
|
};
|
|
2359
2367
|
}
|
|
2360
2368
|
};
|
|
2361
|
-
var
|
|
2369
|
+
var usageSchema2 = z12.object({
|
|
2362
2370
|
input_tokens: z12.number(),
|
|
2363
2371
|
input_tokens_details: z12.object({ cached_tokens: z12.number().nullish() }).nullish(),
|
|
2364
2372
|
output_tokens: z12.number(),
|
|
@@ -2372,7 +2380,7 @@ var responseFinishedChunkSchema = z12.object({
|
|
|
2372
2380
|
type: z12.enum(["response.completed", "response.incomplete"]),
|
|
2373
2381
|
response: z12.object({
|
|
2374
2382
|
incomplete_details: z12.object({ reason: z12.string() }).nullish(),
|
|
2375
|
-
usage:
|
|
2383
|
+
usage: usageSchema2
|
|
2376
2384
|
})
|
|
2377
2385
|
});
|
|
2378
2386
|
var responseCreatedChunkSchema = z12.object({
|
|
@@ -2612,10 +2620,9 @@ var OpenAISpeechModel = class {
|
|
|
2612
2620
|
|
|
2613
2621
|
// src/openai-provider.ts
|
|
2614
2622
|
function createOpenAI(options = {}) {
|
|
2615
|
-
var _a, _b
|
|
2623
|
+
var _a, _b;
|
|
2616
2624
|
const baseURL = (_a = withoutTrailingSlash(options.baseURL)) != null ? _a : "https://api.openai.com/v1";
|
|
2617
|
-
const
|
|
2618
|
-
const providerName = (_c = options.name) != null ? _c : "openai";
|
|
2625
|
+
const providerName = (_b = options.name) != null ? _b : "openai";
|
|
2619
2626
|
const getHeaders = () => ({
|
|
2620
2627
|
Authorization: `Bearer ${loadApiKey({
|
|
2621
2628
|
apiKey: options.apiKey,
|
|
@@ -2630,14 +2637,12 @@ function createOpenAI(options = {}) {
|
|
|
2630
2637
|
provider: `${providerName}.chat`,
|
|
2631
2638
|
url: ({ path }) => `${baseURL}${path}`,
|
|
2632
2639
|
headers: getHeaders,
|
|
2633
|
-
compatibility,
|
|
2634
2640
|
fetch: options.fetch
|
|
2635
2641
|
});
|
|
2636
2642
|
const createCompletionModel = (modelId) => new OpenAICompletionLanguageModel(modelId, {
|
|
2637
2643
|
provider: `${providerName}.completion`,
|
|
2638
2644
|
url: ({ path }) => `${baseURL}${path}`,
|
|
2639
2645
|
headers: getHeaders,
|
|
2640
|
-
compatibility,
|
|
2641
2646
|
fetch: options.fetch
|
|
2642
2647
|
});
|
|
2643
2648
|
const createEmbeddingModel = (modelId) => new OpenAIEmbeddingModel(modelId, {
|
|
@@ -2702,10 +2707,7 @@ function createOpenAI(options = {}) {
|
|
|
2702
2707
|
provider.tools = openaiTools;
|
|
2703
2708
|
return provider;
|
|
2704
2709
|
}
|
|
2705
|
-
var openai = createOpenAI(
|
|
2706
|
-
compatibility: "strict"
|
|
2707
|
-
// strict for OpenAI API
|
|
2708
|
-
});
|
|
2710
|
+
var openai = createOpenAI();
|
|
2709
2711
|
export {
|
|
2710
2712
|
createOpenAI,
|
|
2711
2713
|
openai
|