@ai-sdk/openai 2.0.0-canary.15 → 2.0.0-canary.17
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +21 -0
- package/README.md +2 -2
- package/dist/index.d.mts +1 -8
- package/dist/index.d.ts +1 -8
- package/dist/index.js +88 -86
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +88 -86
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +15 -17
- package/dist/internal/index.d.ts +15 -17
- package/dist/internal/index.js +85 -77
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +85 -77
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +4 -4
package/CHANGELOG.md
CHANGED
|
@@ -1,5 +1,26 @@
|
|
|
1
1
|
# @ai-sdk/openai
|
|
2
2
|
|
|
3
|
+
## 2.0.0-canary.17
|
|
4
|
+
|
|
5
|
+
### Patch Changes
|
|
6
|
+
|
|
7
|
+
- 52ce942: chore(providers/openai): remove & enable strict compatibility by default
|
|
8
|
+
- Updated dependencies [87b828f]
|
|
9
|
+
- @ai-sdk/provider-utils@3.0.0-canary.16
|
|
10
|
+
|
|
11
|
+
## 2.0.0-canary.16
|
|
12
|
+
|
|
13
|
+
### Patch Changes
|
|
14
|
+
|
|
15
|
+
- 928fadf: fix(providers/openai): logprobs for stream alongside completion model
|
|
16
|
+
- 6f231db: fix(providers): always use optional instead of mix of nullish for providerOptions
|
|
17
|
+
- Updated dependencies [a571d6e]
|
|
18
|
+
- Updated dependencies [a8c8bd5]
|
|
19
|
+
- Updated dependencies [7979f7f]
|
|
20
|
+
- Updated dependencies [41fa418]
|
|
21
|
+
- @ai-sdk/provider-utils@3.0.0-canary.15
|
|
22
|
+
- @ai-sdk/provider@2.0.0-canary.14
|
|
23
|
+
|
|
3
24
|
## 2.0.0-canary.15
|
|
4
25
|
|
|
5
26
|
### Patch Changes
|
package/README.md
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
# AI SDK - OpenAI Provider
|
|
2
2
|
|
|
3
|
-
The **[OpenAI provider](https://sdk.
|
|
3
|
+
The **[OpenAI provider](https://ai-sdk.dev/providers/ai-sdk-providers/openai)** for the [AI SDK](https://ai-sdk.dev/docs)
|
|
4
4
|
contains language model support for the OpenAI chat and completion APIs and embedding model support for the OpenAI embeddings API.
|
|
5
5
|
|
|
6
6
|
## Setup
|
|
@@ -33,4 +33,4 @@ const { text } = await generateText({
|
|
|
33
33
|
|
|
34
34
|
## Documentation
|
|
35
35
|
|
|
36
|
-
Please check out the **[OpenAI provider documentation](https://sdk.
|
|
36
|
+
Please check out the **[OpenAI provider documentation](https://ai-sdk.dev/providers/ai-sdk-providers/openai)** for more information.
|
package/dist/index.d.mts
CHANGED
|
@@ -8,7 +8,6 @@ type OpenAICompletionModelId = 'gpt-3.5-turbo-instruct' | (string & {});
|
|
|
8
8
|
|
|
9
9
|
type OpenAICompletionConfig = {
|
|
10
10
|
provider: string;
|
|
11
|
-
compatibility: 'strict' | 'compatible';
|
|
12
11
|
headers: () => Record<string, string | undefined>;
|
|
13
12
|
url: (options: {
|
|
14
13
|
modelId: string;
|
|
@@ -143,12 +142,6 @@ interface OpenAIProviderSettings {
|
|
|
143
142
|
*/
|
|
144
143
|
headers?: Record<string, string>;
|
|
145
144
|
/**
|
|
146
|
-
OpenAI compatibility mode. Should be set to `strict` when using the OpenAI API,
|
|
147
|
-
and `compatible` when using 3rd party providers. In `compatible` mode, newer
|
|
148
|
-
information such as streamOptions are not being sent. Defaults to 'compatible'.
|
|
149
|
-
*/
|
|
150
|
-
compatibility?: 'strict' | 'compatible';
|
|
151
|
-
/**
|
|
152
145
|
Provider name. Overrides the `openai` default name for 3rd party providers.
|
|
153
146
|
*/
|
|
154
147
|
name?: string;
|
|
@@ -163,7 +156,7 @@ Create an OpenAI provider instance.
|
|
|
163
156
|
*/
|
|
164
157
|
declare function createOpenAI(options?: OpenAIProviderSettings): OpenAIProvider;
|
|
165
158
|
/**
|
|
166
|
-
Default OpenAI provider instance.
|
|
159
|
+
Default OpenAI provider instance.
|
|
167
160
|
*/
|
|
168
161
|
declare const openai: OpenAIProvider;
|
|
169
162
|
|
package/dist/index.d.ts
CHANGED
|
@@ -8,7 +8,6 @@ type OpenAICompletionModelId = 'gpt-3.5-turbo-instruct' | (string & {});
|
|
|
8
8
|
|
|
9
9
|
type OpenAICompletionConfig = {
|
|
10
10
|
provider: string;
|
|
11
|
-
compatibility: 'strict' | 'compatible';
|
|
12
11
|
headers: () => Record<string, string | undefined>;
|
|
13
12
|
url: (options: {
|
|
14
13
|
modelId: string;
|
|
@@ -143,12 +142,6 @@ interface OpenAIProviderSettings {
|
|
|
143
142
|
*/
|
|
144
143
|
headers?: Record<string, string>;
|
|
145
144
|
/**
|
|
146
|
-
OpenAI compatibility mode. Should be set to `strict` when using the OpenAI API,
|
|
147
|
-
and `compatible` when using 3rd party providers. In `compatible` mode, newer
|
|
148
|
-
information such as streamOptions are not being sent. Defaults to 'compatible'.
|
|
149
|
-
*/
|
|
150
|
-
compatibility?: 'strict' | 'compatible';
|
|
151
|
-
/**
|
|
152
145
|
Provider name. Overrides the `openai` default name for 3rd party providers.
|
|
153
146
|
*/
|
|
154
147
|
name?: string;
|
|
@@ -163,7 +156,7 @@ Create an OpenAI provider instance.
|
|
|
163
156
|
*/
|
|
164
157
|
declare function createOpenAI(options?: OpenAIProviderSettings): OpenAIProvider;
|
|
165
158
|
/**
|
|
166
|
-
Default OpenAI provider instance.
|
|
159
|
+
Default OpenAI provider instance.
|
|
167
160
|
*/
|
|
168
161
|
declare const openai: OpenAIProvider;
|
|
169
162
|
|
package/dist/index.js
CHANGED
|
@@ -545,7 +545,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
545
545
|
};
|
|
546
546
|
}
|
|
547
547
|
async doGenerate(options) {
|
|
548
|
-
var _a, _b, _c, _d, _e, _f, _g, _h, _i;
|
|
548
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m;
|
|
549
549
|
const { args: body, warnings } = await this.getArgs(options);
|
|
550
550
|
const {
|
|
551
551
|
responseHeaders,
|
|
@@ -583,18 +583,12 @@ var OpenAIChatLanguageModel = class {
|
|
|
583
583
|
const completionTokenDetails = (_c = response.usage) == null ? void 0 : _c.completion_tokens_details;
|
|
584
584
|
const promptTokenDetails = (_d = response.usage) == null ? void 0 : _d.prompt_tokens_details;
|
|
585
585
|
const providerMetadata = { openai: {} };
|
|
586
|
-
if ((completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null) {
|
|
587
|
-
providerMetadata.openai.reasoningTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens;
|
|
588
|
-
}
|
|
589
586
|
if ((completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens) != null) {
|
|
590
587
|
providerMetadata.openai.acceptedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens;
|
|
591
588
|
}
|
|
592
589
|
if ((completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens) != null) {
|
|
593
590
|
providerMetadata.openai.rejectedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens;
|
|
594
591
|
}
|
|
595
|
-
if ((promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens) != null) {
|
|
596
|
-
providerMetadata.openai.cachedPromptTokens = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens;
|
|
597
|
-
}
|
|
598
592
|
if (((_e = choice.logprobs) == null ? void 0 : _e.content) != null) {
|
|
599
593
|
providerMetadata.openai.logprobs = choice.logprobs.content;
|
|
600
594
|
}
|
|
@@ -603,7 +597,10 @@ var OpenAIChatLanguageModel = class {
|
|
|
603
597
|
finishReason: mapOpenAIFinishReason(choice.finish_reason),
|
|
604
598
|
usage: {
|
|
605
599
|
inputTokens: (_g = (_f = response.usage) == null ? void 0 : _f.prompt_tokens) != null ? _g : void 0,
|
|
606
|
-
outputTokens: (_i = (_h = response.usage) == null ? void 0 : _h.completion_tokens) != null ? _i : void 0
|
|
600
|
+
outputTokens: (_i = (_h = response.usage) == null ? void 0 : _h.completion_tokens) != null ? _i : void 0,
|
|
601
|
+
totalTokens: (_k = (_j = response.usage) == null ? void 0 : _j.total_tokens) != null ? _k : void 0,
|
|
602
|
+
reasoningTokens: (_l = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null ? _l : void 0,
|
|
603
|
+
cachedInputTokens: (_m = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens) != null ? _m : void 0
|
|
607
604
|
},
|
|
608
605
|
request: { body },
|
|
609
606
|
response: {
|
|
@@ -620,8 +617,9 @@ var OpenAIChatLanguageModel = class {
|
|
|
620
617
|
const body = {
|
|
621
618
|
...args,
|
|
622
619
|
stream: true,
|
|
623
|
-
|
|
624
|
-
|
|
620
|
+
stream_options: {
|
|
621
|
+
include_usage: true
|
|
622
|
+
}
|
|
625
623
|
};
|
|
626
624
|
const { responseHeaders, value: response } = await (0, import_provider_utils3.postJsonToApi)({
|
|
627
625
|
url: this.config.url({
|
|
@@ -637,12 +635,12 @@ var OpenAIChatLanguageModel = class {
|
|
|
637
635
|
abortSignal: options.abortSignal,
|
|
638
636
|
fetch: this.config.fetch
|
|
639
637
|
});
|
|
640
|
-
const { messages: rawPrompt, ...rawSettings } = args;
|
|
641
638
|
const toolCalls = [];
|
|
642
639
|
let finishReason = "unknown";
|
|
643
640
|
const usage = {
|
|
644
641
|
inputTokens: void 0,
|
|
645
|
-
outputTokens: void 0
|
|
642
|
+
outputTokens: void 0,
|
|
643
|
+
totalTokens: void 0
|
|
646
644
|
};
|
|
647
645
|
let isFirstChunk = true;
|
|
648
646
|
const providerMetadata = { openai: {} };
|
|
@@ -653,7 +651,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
653
651
|
controller.enqueue({ type: "stream-start", warnings });
|
|
654
652
|
},
|
|
655
653
|
transform(chunk, controller) {
|
|
656
|
-
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l;
|
|
654
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x;
|
|
657
655
|
if (!chunk.success) {
|
|
658
656
|
finishReason = "error";
|
|
659
657
|
controller.enqueue({ type: "error", error: chunk.error });
|
|
@@ -673,31 +671,25 @@ var OpenAIChatLanguageModel = class {
|
|
|
673
671
|
});
|
|
674
672
|
}
|
|
675
673
|
if (value.usage != null) {
|
|
676
|
-
|
|
677
|
-
|
|
678
|
-
|
|
679
|
-
|
|
680
|
-
|
|
681
|
-
|
|
682
|
-
|
|
683
|
-
usage.outputTokens = completion_tokens != null ? completion_tokens : void 0;
|
|
684
|
-
if ((completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens) != null) {
|
|
685
|
-
providerMetadata.openai.reasoningTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens;
|
|
686
|
-
}
|
|
687
|
-
if ((completion_tokens_details == null ? void 0 : completion_tokens_details.accepted_prediction_tokens) != null) {
|
|
688
|
-
providerMetadata.openai.acceptedPredictionTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.accepted_prediction_tokens;
|
|
689
|
-
}
|
|
690
|
-
if ((completion_tokens_details == null ? void 0 : completion_tokens_details.rejected_prediction_tokens) != null) {
|
|
691
|
-
providerMetadata.openai.rejectedPredictionTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.rejected_prediction_tokens;
|
|
674
|
+
usage.inputTokens = (_a = value.usage.prompt_tokens) != null ? _a : void 0;
|
|
675
|
+
usage.outputTokens = (_b = value.usage.completion_tokens) != null ? _b : void 0;
|
|
676
|
+
usage.totalTokens = (_c = value.usage.total_tokens) != null ? _c : void 0;
|
|
677
|
+
usage.reasoningTokens = (_e = (_d = value.usage.completion_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : void 0;
|
|
678
|
+
usage.cachedInputTokens = (_g = (_f = value.usage.prompt_tokens_details) == null ? void 0 : _f.cached_tokens) != null ? _g : void 0;
|
|
679
|
+
if (((_h = value.usage.completion_tokens_details) == null ? void 0 : _h.accepted_prediction_tokens) != null) {
|
|
680
|
+
providerMetadata.openai.acceptedPredictionTokens = (_i = value.usage.completion_tokens_details) == null ? void 0 : _i.accepted_prediction_tokens;
|
|
692
681
|
}
|
|
693
|
-
if ((
|
|
694
|
-
providerMetadata.openai.
|
|
682
|
+
if (((_j = value.usage.completion_tokens_details) == null ? void 0 : _j.rejected_prediction_tokens) != null) {
|
|
683
|
+
providerMetadata.openai.rejectedPredictionTokens = (_k = value.usage.completion_tokens_details) == null ? void 0 : _k.rejected_prediction_tokens;
|
|
695
684
|
}
|
|
696
685
|
}
|
|
697
686
|
const choice = value.choices[0];
|
|
698
687
|
if ((choice == null ? void 0 : choice.finish_reason) != null) {
|
|
699
688
|
finishReason = mapOpenAIFinishReason(choice.finish_reason);
|
|
700
689
|
}
|
|
690
|
+
if (((_l = choice == null ? void 0 : choice.logprobs) == null ? void 0 : _l.content) != null) {
|
|
691
|
+
providerMetadata.openai.logprobs = choice.logprobs.content;
|
|
692
|
+
}
|
|
701
693
|
if ((choice == null ? void 0 : choice.delta) == null) {
|
|
702
694
|
return;
|
|
703
695
|
}
|
|
@@ -724,7 +716,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
724
716
|
message: `Expected 'id' to be a string.`
|
|
725
717
|
});
|
|
726
718
|
}
|
|
727
|
-
if (((
|
|
719
|
+
if (((_m = toolCallDelta.function) == null ? void 0 : _m.name) == null) {
|
|
728
720
|
throw new import_provider3.InvalidResponseDataError({
|
|
729
721
|
data: toolCallDelta,
|
|
730
722
|
message: `Expected 'function.name' to be a string.`
|
|
@@ -735,12 +727,12 @@ var OpenAIChatLanguageModel = class {
|
|
|
735
727
|
type: "function",
|
|
736
728
|
function: {
|
|
737
729
|
name: toolCallDelta.function.name,
|
|
738
|
-
arguments: (
|
|
730
|
+
arguments: (_n = toolCallDelta.function.arguments) != null ? _n : ""
|
|
739
731
|
},
|
|
740
732
|
hasFinished: false
|
|
741
733
|
};
|
|
742
734
|
const toolCall2 = toolCalls[index];
|
|
743
|
-
if (((
|
|
735
|
+
if (((_o = toolCall2.function) == null ? void 0 : _o.name) != null && ((_p = toolCall2.function) == null ? void 0 : _p.arguments) != null) {
|
|
744
736
|
if (toolCall2.function.arguments.length > 0) {
|
|
745
737
|
controller.enqueue({
|
|
746
738
|
type: "tool-call-delta",
|
|
@@ -754,7 +746,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
754
746
|
controller.enqueue({
|
|
755
747
|
type: "tool-call",
|
|
756
748
|
toolCallType: "function",
|
|
757
|
-
toolCallId: (
|
|
749
|
+
toolCallId: (_q = toolCall2.id) != null ? _q : (0, import_provider_utils3.generateId)(),
|
|
758
750
|
toolName: toolCall2.function.name,
|
|
759
751
|
args: toolCall2.function.arguments
|
|
760
752
|
});
|
|
@@ -767,21 +759,21 @@ var OpenAIChatLanguageModel = class {
|
|
|
767
759
|
if (toolCall.hasFinished) {
|
|
768
760
|
continue;
|
|
769
761
|
}
|
|
770
|
-
if (((
|
|
771
|
-
toolCall.function.arguments += (
|
|
762
|
+
if (((_r = toolCallDelta.function) == null ? void 0 : _r.arguments) != null) {
|
|
763
|
+
toolCall.function.arguments += (_t = (_s = toolCallDelta.function) == null ? void 0 : _s.arguments) != null ? _t : "";
|
|
772
764
|
}
|
|
773
765
|
controller.enqueue({
|
|
774
766
|
type: "tool-call-delta",
|
|
775
767
|
toolCallType: "function",
|
|
776
768
|
toolCallId: toolCall.id,
|
|
777
769
|
toolName: toolCall.function.name,
|
|
778
|
-
argsTextDelta: (
|
|
770
|
+
argsTextDelta: (_u = toolCallDelta.function.arguments) != null ? _u : ""
|
|
779
771
|
});
|
|
780
|
-
if (((
|
|
772
|
+
if (((_v = toolCall.function) == null ? void 0 : _v.name) != null && ((_w = toolCall.function) == null ? void 0 : _w.arguments) != null && (0, import_provider_utils3.isParsableJson)(toolCall.function.arguments)) {
|
|
781
773
|
controller.enqueue({
|
|
782
774
|
type: "tool-call",
|
|
783
775
|
toolCallType: "function",
|
|
784
|
-
toolCallId: (
|
|
776
|
+
toolCallId: (_x = toolCall.id) != null ? _x : (0, import_provider_utils3.generateId)(),
|
|
785
777
|
toolName: toolCall.function.name,
|
|
786
778
|
args: toolCall.function.arguments
|
|
787
779
|
});
|
|
@@ -808,6 +800,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
808
800
|
var openaiTokenUsageSchema = import_zod3.z.object({
|
|
809
801
|
prompt_tokens: import_zod3.z.number().nullish(),
|
|
810
802
|
completion_tokens: import_zod3.z.number().nullish(),
|
|
803
|
+
total_tokens: import_zod3.z.number().nullish(),
|
|
811
804
|
prompt_tokens_details: import_zod3.z.object({
|
|
812
805
|
cached_tokens: import_zod3.z.number().nullish()
|
|
813
806
|
}).nullish(),
|
|
@@ -879,6 +872,20 @@ var openaiChatChunkSchema = import_zod3.z.union([
|
|
|
879
872
|
})
|
|
880
873
|
).nullish()
|
|
881
874
|
}).nullish(),
|
|
875
|
+
logprobs: import_zod3.z.object({
|
|
876
|
+
content: import_zod3.z.array(
|
|
877
|
+
import_zod3.z.object({
|
|
878
|
+
token: import_zod3.z.string(),
|
|
879
|
+
logprob: import_zod3.z.number(),
|
|
880
|
+
top_logprobs: import_zod3.z.array(
|
|
881
|
+
import_zod3.z.object({
|
|
882
|
+
token: import_zod3.z.string(),
|
|
883
|
+
logprob: import_zod3.z.number()
|
|
884
|
+
})
|
|
885
|
+
)
|
|
886
|
+
})
|
|
887
|
+
).nullish()
|
|
888
|
+
}).nullish(),
|
|
882
889
|
finish_reason: import_zod3.z.string().nullish(),
|
|
883
890
|
index: import_zod3.z.number()
|
|
884
891
|
})
|
|
@@ -1140,6 +1147,7 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1140
1147
|
};
|
|
1141
1148
|
}
|
|
1142
1149
|
async doGenerate(options) {
|
|
1150
|
+
var _a, _b, _c;
|
|
1143
1151
|
const { args, warnings } = await this.getArgs(options);
|
|
1144
1152
|
const {
|
|
1145
1153
|
responseHeaders,
|
|
@@ -1167,8 +1175,9 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1167
1175
|
return {
|
|
1168
1176
|
content: [{ type: "text", text: choice.text }],
|
|
1169
1177
|
usage: {
|
|
1170
|
-
inputTokens: response.usage.prompt_tokens,
|
|
1171
|
-
outputTokens: response.usage.completion_tokens
|
|
1178
|
+
inputTokens: (_a = response.usage) == null ? void 0 : _a.prompt_tokens,
|
|
1179
|
+
outputTokens: (_b = response.usage) == null ? void 0 : _b.completion_tokens,
|
|
1180
|
+
totalTokens: (_c = response.usage) == null ? void 0 : _c.total_tokens
|
|
1172
1181
|
},
|
|
1173
1182
|
finishReason: mapOpenAIFinishReason(choice.finish_reason),
|
|
1174
1183
|
request: { body: args },
|
|
@@ -1186,8 +1195,9 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1186
1195
|
const body = {
|
|
1187
1196
|
...args,
|
|
1188
1197
|
stream: true,
|
|
1189
|
-
|
|
1190
|
-
|
|
1198
|
+
stream_options: {
|
|
1199
|
+
include_usage: true
|
|
1200
|
+
}
|
|
1191
1201
|
};
|
|
1192
1202
|
const { responseHeaders, value: response } = await (0, import_provider_utils4.postJsonToApi)({
|
|
1193
1203
|
url: this.config.url({
|
|
@@ -1207,7 +1217,8 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1207
1217
|
const providerMetadata = { openai: {} };
|
|
1208
1218
|
const usage = {
|
|
1209
1219
|
inputTokens: void 0,
|
|
1210
|
-
outputTokens: void 0
|
|
1220
|
+
outputTokens: void 0,
|
|
1221
|
+
totalTokens: void 0
|
|
1211
1222
|
};
|
|
1212
1223
|
let isFirstChunk = true;
|
|
1213
1224
|
return {
|
|
@@ -1238,6 +1249,7 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1238
1249
|
if (value.usage != null) {
|
|
1239
1250
|
usage.inputTokens = value.usage.prompt_tokens;
|
|
1240
1251
|
usage.outputTokens = value.usage.completion_tokens;
|
|
1252
|
+
usage.totalTokens = value.usage.total_tokens;
|
|
1241
1253
|
}
|
|
1242
1254
|
const choice = value.choices[0];
|
|
1243
1255
|
if ((choice == null ? void 0 : choice.finish_reason) != null) {
|
|
@@ -1268,6 +1280,11 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1268
1280
|
};
|
|
1269
1281
|
}
|
|
1270
1282
|
};
|
|
1283
|
+
var usageSchema = import_zod5.z.object({
|
|
1284
|
+
prompt_tokens: import_zod5.z.number(),
|
|
1285
|
+
completion_tokens: import_zod5.z.number(),
|
|
1286
|
+
total_tokens: import_zod5.z.number()
|
|
1287
|
+
});
|
|
1271
1288
|
var openaiCompletionResponseSchema = import_zod5.z.object({
|
|
1272
1289
|
id: import_zod5.z.string().nullish(),
|
|
1273
1290
|
created: import_zod5.z.number().nullish(),
|
|
@@ -1283,10 +1300,7 @@ var openaiCompletionResponseSchema = import_zod5.z.object({
|
|
|
1283
1300
|
}).nullish()
|
|
1284
1301
|
})
|
|
1285
1302
|
),
|
|
1286
|
-
usage:
|
|
1287
|
-
prompt_tokens: import_zod5.z.number(),
|
|
1288
|
-
completion_tokens: import_zod5.z.number()
|
|
1289
|
-
})
|
|
1303
|
+
usage: usageSchema.nullish()
|
|
1290
1304
|
});
|
|
1291
1305
|
var openaiCompletionChunkSchema = import_zod5.z.union([
|
|
1292
1306
|
import_zod5.z.object({
|
|
@@ -1305,10 +1319,7 @@ var openaiCompletionChunkSchema = import_zod5.z.union([
|
|
|
1305
1319
|
}).nullish()
|
|
1306
1320
|
})
|
|
1307
1321
|
),
|
|
1308
|
-
usage:
|
|
1309
|
-
prompt_tokens: import_zod5.z.number(),
|
|
1310
|
-
completion_tokens: import_zod5.z.number()
|
|
1311
|
-
}).nullish()
|
|
1322
|
+
usage: usageSchema.nullish()
|
|
1312
1323
|
}),
|
|
1313
1324
|
openaiErrorDataSchema
|
|
1314
1325
|
]);
|
|
@@ -1529,25 +1540,25 @@ var openAITranscriptionProviderOptions = import_zod10.z.object({
|
|
|
1529
1540
|
/**
|
|
1530
1541
|
* Additional information to include in the transcription response.
|
|
1531
1542
|
*/
|
|
1532
|
-
include: import_zod10.z.array(import_zod10.z.string()).
|
|
1543
|
+
include: import_zod10.z.array(import_zod10.z.string()).optional(),
|
|
1533
1544
|
/**
|
|
1534
1545
|
* The language of the input audio in ISO-639-1 format.
|
|
1535
1546
|
*/
|
|
1536
|
-
language: import_zod10.z.string().
|
|
1547
|
+
language: import_zod10.z.string().optional(),
|
|
1537
1548
|
/**
|
|
1538
1549
|
* An optional text to guide the model's style or continue a previous audio segment.
|
|
1539
1550
|
*/
|
|
1540
|
-
prompt: import_zod10.z.string().
|
|
1551
|
+
prompt: import_zod10.z.string().optional(),
|
|
1541
1552
|
/**
|
|
1542
1553
|
* The sampling temperature, between 0 and 1.
|
|
1543
1554
|
* @default 0
|
|
1544
1555
|
*/
|
|
1545
|
-
temperature: import_zod10.z.number().min(0).max(1).default(0).
|
|
1556
|
+
temperature: import_zod10.z.number().min(0).max(1).default(0).optional(),
|
|
1546
1557
|
/**
|
|
1547
1558
|
* The timestamp granularities to populate for this transcription.
|
|
1548
1559
|
* @default ['segment']
|
|
1549
1560
|
*/
|
|
1550
|
-
timestampGranularities: import_zod10.z.array(import_zod10.z.enum(["word", "segment"])).default(["segment"]).
|
|
1561
|
+
timestampGranularities: import_zod10.z.array(import_zod10.z.enum(["word", "segment"])).default(["segment"]).optional()
|
|
1551
1562
|
});
|
|
1552
1563
|
|
|
1553
1564
|
// src/openai-transcription-model.ts
|
|
@@ -2118,7 +2129,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2118
2129
|
])
|
|
2119
2130
|
),
|
|
2120
2131
|
incomplete_details: import_zod12.z.object({ reason: import_zod12.z.string() }).nullable(),
|
|
2121
|
-
usage:
|
|
2132
|
+
usage: usageSchema2
|
|
2122
2133
|
})
|
|
2123
2134
|
),
|
|
2124
2135
|
abortSignal: options.abortSignal,
|
|
@@ -2172,7 +2183,10 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2172
2183
|
}),
|
|
2173
2184
|
usage: {
|
|
2174
2185
|
inputTokens: response.usage.input_tokens,
|
|
2175
|
-
outputTokens: response.usage.output_tokens
|
|
2186
|
+
outputTokens: response.usage.output_tokens,
|
|
2187
|
+
totalTokens: response.usage.input_tokens + response.usage.output_tokens,
|
|
2188
|
+
reasoningTokens: (_f = (_e = response.usage.output_tokens_details) == null ? void 0 : _e.reasoning_tokens) != null ? _f : void 0,
|
|
2189
|
+
cachedInputTokens: (_h = (_g = response.usage.input_tokens_details) == null ? void 0 : _g.cached_tokens) != null ? _h : void 0
|
|
2176
2190
|
},
|
|
2177
2191
|
request: { body },
|
|
2178
2192
|
response: {
|
|
@@ -2184,9 +2198,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2184
2198
|
},
|
|
2185
2199
|
providerMetadata: {
|
|
2186
2200
|
openai: {
|
|
2187
|
-
responseId: response.id
|
|
2188
|
-
cachedPromptTokens: (_f = (_e = response.usage.input_tokens_details) == null ? void 0 : _e.cached_tokens) != null ? _f : null,
|
|
2189
|
-
reasoningTokens: (_h = (_g = response.usage.output_tokens_details) == null ? void 0 : _g.reasoning_tokens) != null ? _h : null
|
|
2201
|
+
responseId: response.id
|
|
2190
2202
|
}
|
|
2191
2203
|
},
|
|
2192
2204
|
warnings
|
|
@@ -2215,10 +2227,9 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2215
2227
|
let finishReason = "unknown";
|
|
2216
2228
|
const usage = {
|
|
2217
2229
|
inputTokens: void 0,
|
|
2218
|
-
outputTokens: void 0
|
|
2230
|
+
outputTokens: void 0,
|
|
2231
|
+
totalTokens: void 0
|
|
2219
2232
|
};
|
|
2220
|
-
let cachedPromptTokens = null;
|
|
2221
|
-
let reasoningTokens = null;
|
|
2222
2233
|
let responseId = null;
|
|
2223
2234
|
const ongoingToolCalls = {};
|
|
2224
2235
|
let hasToolCalls = false;
|
|
@@ -2296,8 +2307,9 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2296
2307
|
});
|
|
2297
2308
|
usage.inputTokens = value.response.usage.input_tokens;
|
|
2298
2309
|
usage.outputTokens = value.response.usage.output_tokens;
|
|
2299
|
-
|
|
2300
|
-
reasoningTokens = (
|
|
2310
|
+
usage.totalTokens = value.response.usage.input_tokens + value.response.usage.output_tokens;
|
|
2311
|
+
usage.reasoningTokens = (_c = (_b = value.response.usage.output_tokens_details) == null ? void 0 : _b.reasoning_tokens) != null ? _c : void 0;
|
|
2312
|
+
usage.cachedInputTokens = (_e = (_d = value.response.usage.input_tokens_details) == null ? void 0 : _d.cached_tokens) != null ? _e : void 0;
|
|
2301
2313
|
} else if (isResponseAnnotationAddedChunk(value)) {
|
|
2302
2314
|
controller.enqueue({
|
|
2303
2315
|
type: "source",
|
|
@@ -2313,13 +2325,9 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2313
2325
|
type: "finish",
|
|
2314
2326
|
finishReason,
|
|
2315
2327
|
usage,
|
|
2316
|
-
|
|
2317
|
-
|
|
2318
|
-
|
|
2319
|
-
responseId,
|
|
2320
|
-
cachedPromptTokens,
|
|
2321
|
-
reasoningTokens
|
|
2322
|
-
}
|
|
2328
|
+
providerMetadata: {
|
|
2329
|
+
openai: {
|
|
2330
|
+
responseId
|
|
2323
2331
|
}
|
|
2324
2332
|
}
|
|
2325
2333
|
});
|
|
@@ -2331,7 +2339,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2331
2339
|
};
|
|
2332
2340
|
}
|
|
2333
2341
|
};
|
|
2334
|
-
var
|
|
2342
|
+
var usageSchema2 = import_zod12.z.object({
|
|
2335
2343
|
input_tokens: import_zod12.z.number(),
|
|
2336
2344
|
input_tokens_details: import_zod12.z.object({ cached_tokens: import_zod12.z.number().nullish() }).nullish(),
|
|
2337
2345
|
output_tokens: import_zod12.z.number(),
|
|
@@ -2345,7 +2353,7 @@ var responseFinishedChunkSchema = import_zod12.z.object({
|
|
|
2345
2353
|
type: import_zod12.z.enum(["response.completed", "response.incomplete"]),
|
|
2346
2354
|
response: import_zod12.z.object({
|
|
2347
2355
|
incomplete_details: import_zod12.z.object({ reason: import_zod12.z.string() }).nullish(),
|
|
2348
|
-
usage:
|
|
2356
|
+
usage: usageSchema2
|
|
2349
2357
|
})
|
|
2350
2358
|
});
|
|
2351
2359
|
var responseCreatedChunkSchema = import_zod12.z.object({
|
|
@@ -2580,10 +2588,9 @@ var OpenAISpeechModel = class {
|
|
|
2580
2588
|
|
|
2581
2589
|
// src/openai-provider.ts
|
|
2582
2590
|
function createOpenAI(options = {}) {
|
|
2583
|
-
var _a, _b
|
|
2591
|
+
var _a, _b;
|
|
2584
2592
|
const baseURL = (_a = (0, import_provider_utils10.withoutTrailingSlash)(options.baseURL)) != null ? _a : "https://api.openai.com/v1";
|
|
2585
|
-
const
|
|
2586
|
-
const providerName = (_c = options.name) != null ? _c : "openai";
|
|
2593
|
+
const providerName = (_b = options.name) != null ? _b : "openai";
|
|
2587
2594
|
const getHeaders = () => ({
|
|
2588
2595
|
Authorization: `Bearer ${(0, import_provider_utils10.loadApiKey)({
|
|
2589
2596
|
apiKey: options.apiKey,
|
|
@@ -2598,14 +2605,12 @@ function createOpenAI(options = {}) {
|
|
|
2598
2605
|
provider: `${providerName}.chat`,
|
|
2599
2606
|
url: ({ path }) => `${baseURL}${path}`,
|
|
2600
2607
|
headers: getHeaders,
|
|
2601
|
-
compatibility,
|
|
2602
2608
|
fetch: options.fetch
|
|
2603
2609
|
});
|
|
2604
2610
|
const createCompletionModel = (modelId) => new OpenAICompletionLanguageModel(modelId, {
|
|
2605
2611
|
provider: `${providerName}.completion`,
|
|
2606
2612
|
url: ({ path }) => `${baseURL}${path}`,
|
|
2607
2613
|
headers: getHeaders,
|
|
2608
|
-
compatibility,
|
|
2609
2614
|
fetch: options.fetch
|
|
2610
2615
|
});
|
|
2611
2616
|
const createEmbeddingModel = (modelId) => new OpenAIEmbeddingModel(modelId, {
|
|
@@ -2670,10 +2675,7 @@ function createOpenAI(options = {}) {
|
|
|
2670
2675
|
provider.tools = openaiTools;
|
|
2671
2676
|
return provider;
|
|
2672
2677
|
}
|
|
2673
|
-
var openai = createOpenAI(
|
|
2674
|
-
compatibility: "strict"
|
|
2675
|
-
// strict for OpenAI API
|
|
2676
|
-
});
|
|
2678
|
+
var openai = createOpenAI();
|
|
2677
2679
|
// Annotate the CommonJS export names for ESM import in node:
|
|
2678
2680
|
0 && (module.exports = {
|
|
2679
2681
|
createOpenAI,
|