@ai-sdk/openai 2.0.0-canary.14 → 2.0.0-canary.16
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +44 -0
- package/README.md +2 -2
- package/dist/index.js +170 -79
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +170 -79
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +39 -15
- package/dist/internal/index.d.ts +39 -15
- package/dist/internal/index.js +170 -79
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +170 -79
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +4 -4
package/CHANGELOG.md
CHANGED
|
@@ -1,5 +1,49 @@
|
|
|
1
1
|
# @ai-sdk/openai
|
|
2
2
|
|
|
3
|
+
## 2.0.0-canary.16
|
|
4
|
+
|
|
5
|
+
### Patch Changes
|
|
6
|
+
|
|
7
|
+
- 928fadf: fix(providers/openai): logprobs for stream alongside completion model
|
|
8
|
+
- 6f231db: fix(providers): always use optional instead of mix of nullish for providerOptions
|
|
9
|
+
- Updated dependencies [a571d6e]
|
|
10
|
+
- Updated dependencies [a8c8bd5]
|
|
11
|
+
- Updated dependencies [7979f7f]
|
|
12
|
+
- Updated dependencies [41fa418]
|
|
13
|
+
- @ai-sdk/provider-utils@3.0.0-canary.15
|
|
14
|
+
- @ai-sdk/provider@2.0.0-canary.14
|
|
15
|
+
|
|
16
|
+
## 2.0.0-canary.15
|
|
17
|
+
|
|
18
|
+
### Patch Changes
|
|
19
|
+
|
|
20
|
+
- 136819b: chore(providers/openai): re-introduce logprobs as providerMetadata
|
|
21
|
+
- 9bd5ab5: feat (provider): add providerMetadata to ImageModelV2 interface (#5977)
|
|
22
|
+
|
|
23
|
+
The `experimental_generateImage` method from the `ai` package now returnes revised prompts for OpenAI's image models.
|
|
24
|
+
|
|
25
|
+
```js
|
|
26
|
+
const prompt = 'Santa Claus driving a Cadillac';
|
|
27
|
+
|
|
28
|
+
const { providerMetadata } = await experimental_generateImage({
|
|
29
|
+
model: openai.image('dall-e-3'),
|
|
30
|
+
prompt,
|
|
31
|
+
});
|
|
32
|
+
|
|
33
|
+
const revisedPrompt = providerMetadata.openai.images[0]?.revisedPrompt;
|
|
34
|
+
|
|
35
|
+
console.log({
|
|
36
|
+
prompt,
|
|
37
|
+
revisedPrompt,
|
|
38
|
+
});
|
|
39
|
+
```
|
|
40
|
+
|
|
41
|
+
- 284353f: fix(providers/openai): zod parse error with function
|
|
42
|
+
- Updated dependencies [957b739]
|
|
43
|
+
- Updated dependencies [9bd5ab5]
|
|
44
|
+
- @ai-sdk/provider-utils@3.0.0-canary.14
|
|
45
|
+
- @ai-sdk/provider@2.0.0-canary.13
|
|
46
|
+
|
|
3
47
|
## 2.0.0-canary.14
|
|
4
48
|
|
|
5
49
|
### Patch Changes
|
package/README.md
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
# AI SDK - OpenAI Provider
|
|
2
2
|
|
|
3
|
-
The **[OpenAI provider](https://sdk.
|
|
3
|
+
The **[OpenAI provider](https://ai-sdk.dev/providers/ai-sdk-providers/openai)** for the [AI SDK](https://ai-sdk.dev/docs)
|
|
4
4
|
contains language model support for the OpenAI chat and completion APIs and embedding model support for the OpenAI embeddings API.
|
|
5
5
|
|
|
6
6
|
## Setup
|
|
@@ -33,4 +33,4 @@ const { text } = await generateText({
|
|
|
33
33
|
|
|
34
34
|
## Documentation
|
|
35
35
|
|
|
36
|
-
Please check out the **[OpenAI provider documentation](https://sdk.
|
|
36
|
+
Please check out the **[OpenAI provider documentation](https://ai-sdk.dev/providers/ai-sdk-providers/openai)** for more information.
|
package/dist/index.js
CHANGED
|
@@ -238,6 +238,16 @@ var openaiProviderOptions = import_zod.z.object({
|
|
|
238
238
|
* the GPT tokenizer) to an associated bias value from -100 to 100.
|
|
239
239
|
*/
|
|
240
240
|
logitBias: import_zod.z.record(import_zod.z.coerce.number(), import_zod.z.number()).optional(),
|
|
241
|
+
/**
|
|
242
|
+
* Return the log probabilities of the tokens.
|
|
243
|
+
*
|
|
244
|
+
* Setting to true will return the log probabilities of the tokens that
|
|
245
|
+
* were generated.
|
|
246
|
+
*
|
|
247
|
+
* Setting to a number will return the log probabilities of the top n
|
|
248
|
+
* tokens that were generated.
|
|
249
|
+
*/
|
|
250
|
+
logprobs: import_zod.z.union([import_zod.z.boolean(), import_zod.z.number()]).optional(),
|
|
241
251
|
/**
|
|
242
252
|
* Whether to enable parallel function calling during tool use. Default to true.
|
|
243
253
|
*/
|
|
@@ -412,6 +422,8 @@ var OpenAIChatLanguageModel = class {
|
|
|
412
422
|
model: this.modelId,
|
|
413
423
|
// model specific settings:
|
|
414
424
|
logit_bias: openaiOptions.logitBias,
|
|
425
|
+
logprobs: openaiOptions.logprobs === true || typeof openaiOptions.logprobs === "number" ? true : void 0,
|
|
426
|
+
top_logprobs: typeof openaiOptions.logprobs === "number" ? openaiOptions.logprobs : typeof openaiOptions.logprobs === "boolean" ? openaiOptions.logprobs ? 0 : void 0 : void 0,
|
|
415
427
|
user: openaiOptions.user,
|
|
416
428
|
parallel_tool_calls: openaiOptions.parallelToolCalls,
|
|
417
429
|
// standardized settings:
|
|
@@ -484,6 +496,20 @@ var OpenAIChatLanguageModel = class {
|
|
|
484
496
|
message: "logitBias is not supported for reasoning models"
|
|
485
497
|
});
|
|
486
498
|
}
|
|
499
|
+
if (baseArgs.logprobs != null) {
|
|
500
|
+
baseArgs.logprobs = void 0;
|
|
501
|
+
warnings.push({
|
|
502
|
+
type: "other",
|
|
503
|
+
message: "logprobs is not supported for reasoning models"
|
|
504
|
+
});
|
|
505
|
+
}
|
|
506
|
+
if (baseArgs.top_logprobs != null) {
|
|
507
|
+
baseArgs.top_logprobs = void 0;
|
|
508
|
+
warnings.push({
|
|
509
|
+
type: "other",
|
|
510
|
+
message: "topLogprobs is not supported for reasoning models"
|
|
511
|
+
});
|
|
512
|
+
}
|
|
487
513
|
if (baseArgs.max_tokens != null) {
|
|
488
514
|
if (baseArgs.max_completion_tokens == null) {
|
|
489
515
|
baseArgs.max_completion_tokens = baseArgs.max_tokens;
|
|
@@ -519,7 +545,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
519
545
|
};
|
|
520
546
|
}
|
|
521
547
|
async doGenerate(options) {
|
|
522
|
-
var _a, _b, _c, _d, _e, _f, _g, _h;
|
|
548
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m;
|
|
523
549
|
const { args: body, warnings } = await this.getArgs(options);
|
|
524
550
|
const {
|
|
525
551
|
responseHeaders,
|
|
@@ -557,24 +583,24 @@ var OpenAIChatLanguageModel = class {
|
|
|
557
583
|
const completionTokenDetails = (_c = response.usage) == null ? void 0 : _c.completion_tokens_details;
|
|
558
584
|
const promptTokenDetails = (_d = response.usage) == null ? void 0 : _d.prompt_tokens_details;
|
|
559
585
|
const providerMetadata = { openai: {} };
|
|
560
|
-
if ((completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null) {
|
|
561
|
-
providerMetadata.openai.reasoningTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens;
|
|
562
|
-
}
|
|
563
586
|
if ((completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens) != null) {
|
|
564
587
|
providerMetadata.openai.acceptedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens;
|
|
565
588
|
}
|
|
566
589
|
if ((completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens) != null) {
|
|
567
590
|
providerMetadata.openai.rejectedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens;
|
|
568
591
|
}
|
|
569
|
-
if ((
|
|
570
|
-
providerMetadata.openai.
|
|
592
|
+
if (((_e = choice.logprobs) == null ? void 0 : _e.content) != null) {
|
|
593
|
+
providerMetadata.openai.logprobs = choice.logprobs.content;
|
|
571
594
|
}
|
|
572
595
|
return {
|
|
573
596
|
content,
|
|
574
597
|
finishReason: mapOpenAIFinishReason(choice.finish_reason),
|
|
575
598
|
usage: {
|
|
576
|
-
inputTokens: (
|
|
577
|
-
outputTokens: (
|
|
599
|
+
inputTokens: (_g = (_f = response.usage) == null ? void 0 : _f.prompt_tokens) != null ? _g : void 0,
|
|
600
|
+
outputTokens: (_i = (_h = response.usage) == null ? void 0 : _h.completion_tokens) != null ? _i : void 0,
|
|
601
|
+
totalTokens: (_k = (_j = response.usage) == null ? void 0 : _j.total_tokens) != null ? _k : void 0,
|
|
602
|
+
reasoningTokens: (_l = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null ? _l : void 0,
|
|
603
|
+
cachedInputTokens: (_m = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens) != null ? _m : void 0
|
|
578
604
|
},
|
|
579
605
|
request: { body },
|
|
580
606
|
response: {
|
|
@@ -608,12 +634,12 @@ var OpenAIChatLanguageModel = class {
|
|
|
608
634
|
abortSignal: options.abortSignal,
|
|
609
635
|
fetch: this.config.fetch
|
|
610
636
|
});
|
|
611
|
-
const { messages: rawPrompt, ...rawSettings } = args;
|
|
612
637
|
const toolCalls = [];
|
|
613
638
|
let finishReason = "unknown";
|
|
614
639
|
const usage = {
|
|
615
640
|
inputTokens: void 0,
|
|
616
|
-
outputTokens: void 0
|
|
641
|
+
outputTokens: void 0,
|
|
642
|
+
totalTokens: void 0
|
|
617
643
|
};
|
|
618
644
|
let isFirstChunk = true;
|
|
619
645
|
const providerMetadata = { openai: {} };
|
|
@@ -624,7 +650,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
624
650
|
controller.enqueue({ type: "stream-start", warnings });
|
|
625
651
|
},
|
|
626
652
|
transform(chunk, controller) {
|
|
627
|
-
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l;
|
|
653
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x;
|
|
628
654
|
if (!chunk.success) {
|
|
629
655
|
finishReason = "error";
|
|
630
656
|
controller.enqueue({ type: "error", error: chunk.error });
|
|
@@ -644,31 +670,25 @@ var OpenAIChatLanguageModel = class {
|
|
|
644
670
|
});
|
|
645
671
|
}
|
|
646
672
|
if (value.usage != null) {
|
|
647
|
-
|
|
648
|
-
|
|
649
|
-
|
|
650
|
-
|
|
651
|
-
|
|
652
|
-
|
|
653
|
-
|
|
654
|
-
usage.outputTokens = completion_tokens != null ? completion_tokens : void 0;
|
|
655
|
-
if ((completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens) != null) {
|
|
656
|
-
providerMetadata.openai.reasoningTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens;
|
|
657
|
-
}
|
|
658
|
-
if ((completion_tokens_details == null ? void 0 : completion_tokens_details.accepted_prediction_tokens) != null) {
|
|
659
|
-
providerMetadata.openai.acceptedPredictionTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.accepted_prediction_tokens;
|
|
673
|
+
usage.inputTokens = (_a = value.usage.prompt_tokens) != null ? _a : void 0;
|
|
674
|
+
usage.outputTokens = (_b = value.usage.completion_tokens) != null ? _b : void 0;
|
|
675
|
+
usage.totalTokens = (_c = value.usage.total_tokens) != null ? _c : void 0;
|
|
676
|
+
usage.reasoningTokens = (_e = (_d = value.usage.completion_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : void 0;
|
|
677
|
+
usage.cachedInputTokens = (_g = (_f = value.usage.prompt_tokens_details) == null ? void 0 : _f.cached_tokens) != null ? _g : void 0;
|
|
678
|
+
if (((_h = value.usage.completion_tokens_details) == null ? void 0 : _h.accepted_prediction_tokens) != null) {
|
|
679
|
+
providerMetadata.openai.acceptedPredictionTokens = (_i = value.usage.completion_tokens_details) == null ? void 0 : _i.accepted_prediction_tokens;
|
|
660
680
|
}
|
|
661
|
-
if ((completion_tokens_details == null ? void 0 :
|
|
662
|
-
providerMetadata.openai.rejectedPredictionTokens = completion_tokens_details == null ? void 0 :
|
|
663
|
-
}
|
|
664
|
-
if ((prompt_tokens_details == null ? void 0 : prompt_tokens_details.cached_tokens) != null) {
|
|
665
|
-
providerMetadata.openai.cachedPromptTokens = prompt_tokens_details == null ? void 0 : prompt_tokens_details.cached_tokens;
|
|
681
|
+
if (((_j = value.usage.completion_tokens_details) == null ? void 0 : _j.rejected_prediction_tokens) != null) {
|
|
682
|
+
providerMetadata.openai.rejectedPredictionTokens = (_k = value.usage.completion_tokens_details) == null ? void 0 : _k.rejected_prediction_tokens;
|
|
666
683
|
}
|
|
667
684
|
}
|
|
668
685
|
const choice = value.choices[0];
|
|
669
686
|
if ((choice == null ? void 0 : choice.finish_reason) != null) {
|
|
670
687
|
finishReason = mapOpenAIFinishReason(choice.finish_reason);
|
|
671
688
|
}
|
|
689
|
+
if (((_l = choice == null ? void 0 : choice.logprobs) == null ? void 0 : _l.content) != null) {
|
|
690
|
+
providerMetadata.openai.logprobs = choice.logprobs.content;
|
|
691
|
+
}
|
|
672
692
|
if ((choice == null ? void 0 : choice.delta) == null) {
|
|
673
693
|
return;
|
|
674
694
|
}
|
|
@@ -695,7 +715,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
695
715
|
message: `Expected 'id' to be a string.`
|
|
696
716
|
});
|
|
697
717
|
}
|
|
698
|
-
if (((
|
|
718
|
+
if (((_m = toolCallDelta.function) == null ? void 0 : _m.name) == null) {
|
|
699
719
|
throw new import_provider3.InvalidResponseDataError({
|
|
700
720
|
data: toolCallDelta,
|
|
701
721
|
message: `Expected 'function.name' to be a string.`
|
|
@@ -706,12 +726,12 @@ var OpenAIChatLanguageModel = class {
|
|
|
706
726
|
type: "function",
|
|
707
727
|
function: {
|
|
708
728
|
name: toolCallDelta.function.name,
|
|
709
|
-
arguments: (
|
|
729
|
+
arguments: (_n = toolCallDelta.function.arguments) != null ? _n : ""
|
|
710
730
|
},
|
|
711
731
|
hasFinished: false
|
|
712
732
|
};
|
|
713
733
|
const toolCall2 = toolCalls[index];
|
|
714
|
-
if (((
|
|
734
|
+
if (((_o = toolCall2.function) == null ? void 0 : _o.name) != null && ((_p = toolCall2.function) == null ? void 0 : _p.arguments) != null) {
|
|
715
735
|
if (toolCall2.function.arguments.length > 0) {
|
|
716
736
|
controller.enqueue({
|
|
717
737
|
type: "tool-call-delta",
|
|
@@ -725,7 +745,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
725
745
|
controller.enqueue({
|
|
726
746
|
type: "tool-call",
|
|
727
747
|
toolCallType: "function",
|
|
728
|
-
toolCallId: (
|
|
748
|
+
toolCallId: (_q = toolCall2.id) != null ? _q : (0, import_provider_utils3.generateId)(),
|
|
729
749
|
toolName: toolCall2.function.name,
|
|
730
750
|
args: toolCall2.function.arguments
|
|
731
751
|
});
|
|
@@ -738,21 +758,21 @@ var OpenAIChatLanguageModel = class {
|
|
|
738
758
|
if (toolCall.hasFinished) {
|
|
739
759
|
continue;
|
|
740
760
|
}
|
|
741
|
-
if (((
|
|
742
|
-
toolCall.function.arguments += (
|
|
761
|
+
if (((_r = toolCallDelta.function) == null ? void 0 : _r.arguments) != null) {
|
|
762
|
+
toolCall.function.arguments += (_t = (_s = toolCallDelta.function) == null ? void 0 : _s.arguments) != null ? _t : "";
|
|
743
763
|
}
|
|
744
764
|
controller.enqueue({
|
|
745
765
|
type: "tool-call-delta",
|
|
746
766
|
toolCallType: "function",
|
|
747
767
|
toolCallId: toolCall.id,
|
|
748
768
|
toolName: toolCall.function.name,
|
|
749
|
-
argsTextDelta: (
|
|
769
|
+
argsTextDelta: (_u = toolCallDelta.function.arguments) != null ? _u : ""
|
|
750
770
|
});
|
|
751
|
-
if (((
|
|
771
|
+
if (((_v = toolCall.function) == null ? void 0 : _v.name) != null && ((_w = toolCall.function) == null ? void 0 : _w.arguments) != null && (0, import_provider_utils3.isParsableJson)(toolCall.function.arguments)) {
|
|
752
772
|
controller.enqueue({
|
|
753
773
|
type: "tool-call",
|
|
754
774
|
toolCallType: "function",
|
|
755
|
-
toolCallId: (
|
|
775
|
+
toolCallId: (_x = toolCall.id) != null ? _x : (0, import_provider_utils3.generateId)(),
|
|
756
776
|
toolName: toolCall.function.name,
|
|
757
777
|
args: toolCall.function.arguments
|
|
758
778
|
});
|
|
@@ -779,6 +799,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
779
799
|
var openaiTokenUsageSchema = import_zod3.z.object({
|
|
780
800
|
prompt_tokens: import_zod3.z.number().nullish(),
|
|
781
801
|
completion_tokens: import_zod3.z.number().nullish(),
|
|
802
|
+
total_tokens: import_zod3.z.number().nullish(),
|
|
782
803
|
prompt_tokens_details: import_zod3.z.object({
|
|
783
804
|
cached_tokens: import_zod3.z.number().nullish()
|
|
784
805
|
}).nullish(),
|
|
@@ -809,6 +830,20 @@ var openaiChatResponseSchema = import_zod3.z.object({
|
|
|
809
830
|
).nullish()
|
|
810
831
|
}),
|
|
811
832
|
index: import_zod3.z.number(),
|
|
833
|
+
logprobs: import_zod3.z.object({
|
|
834
|
+
content: import_zod3.z.array(
|
|
835
|
+
import_zod3.z.object({
|
|
836
|
+
token: import_zod3.z.string(),
|
|
837
|
+
logprob: import_zod3.z.number(),
|
|
838
|
+
top_logprobs: import_zod3.z.array(
|
|
839
|
+
import_zod3.z.object({
|
|
840
|
+
token: import_zod3.z.string(),
|
|
841
|
+
logprob: import_zod3.z.number()
|
|
842
|
+
})
|
|
843
|
+
)
|
|
844
|
+
})
|
|
845
|
+
).nullish()
|
|
846
|
+
}).nullish(),
|
|
812
847
|
finish_reason: import_zod3.z.string().nullish()
|
|
813
848
|
})
|
|
814
849
|
),
|
|
@@ -828,7 +863,7 @@ var openaiChatChunkSchema = import_zod3.z.union([
|
|
|
828
863
|
import_zod3.z.object({
|
|
829
864
|
index: import_zod3.z.number(),
|
|
830
865
|
id: import_zod3.z.string().nullish(),
|
|
831
|
-
type: import_zod3.z.literal("function").
|
|
866
|
+
type: import_zod3.z.literal("function").nullish(),
|
|
832
867
|
function: import_zod3.z.object({
|
|
833
868
|
name: import_zod3.z.string().nullish(),
|
|
834
869
|
arguments: import_zod3.z.string().nullish()
|
|
@@ -836,7 +871,21 @@ var openaiChatChunkSchema = import_zod3.z.union([
|
|
|
836
871
|
})
|
|
837
872
|
).nullish()
|
|
838
873
|
}).nullish(),
|
|
839
|
-
|
|
874
|
+
logprobs: import_zod3.z.object({
|
|
875
|
+
content: import_zod3.z.array(
|
|
876
|
+
import_zod3.z.object({
|
|
877
|
+
token: import_zod3.z.string(),
|
|
878
|
+
logprob: import_zod3.z.number(),
|
|
879
|
+
top_logprobs: import_zod3.z.array(
|
|
880
|
+
import_zod3.z.object({
|
|
881
|
+
token: import_zod3.z.string(),
|
|
882
|
+
logprob: import_zod3.z.number()
|
|
883
|
+
})
|
|
884
|
+
)
|
|
885
|
+
})
|
|
886
|
+
).nullish()
|
|
887
|
+
}).nullish(),
|
|
888
|
+
finish_reason: import_zod3.z.string().nullish(),
|
|
840
889
|
index: import_zod3.z.number()
|
|
841
890
|
})
|
|
842
891
|
),
|
|
@@ -996,7 +1045,17 @@ var openaiCompletionProviderOptions = import_zod4.z.object({
|
|
|
996
1045
|
A unique identifier representing your end-user, which can help OpenAI to
|
|
997
1046
|
monitor and detect abuse. Learn more.
|
|
998
1047
|
*/
|
|
999
|
-
user: import_zod4.z.string().optional()
|
|
1048
|
+
user: import_zod4.z.string().optional(),
|
|
1049
|
+
/**
|
|
1050
|
+
Return the log probabilities of the tokens. Including logprobs will increase
|
|
1051
|
+
the response size and can slow down response times. However, it can
|
|
1052
|
+
be useful to better understand how the model is behaving.
|
|
1053
|
+
Setting to true will return the log probabilities of the tokens that
|
|
1054
|
+
were generated.
|
|
1055
|
+
Setting to a number will return the log probabilities of the top n
|
|
1056
|
+
tokens that were generated.
|
|
1057
|
+
*/
|
|
1058
|
+
logprobs: import_zod4.z.union([import_zod4.z.boolean(), import_zod4.z.number()]).optional()
|
|
1000
1059
|
});
|
|
1001
1060
|
|
|
1002
1061
|
// src/openai-completion-language-model.ts
|
|
@@ -1068,6 +1127,7 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1068
1127
|
// model specific settings:
|
|
1069
1128
|
echo: openaiOptions.echo,
|
|
1070
1129
|
logit_bias: openaiOptions.logitBias,
|
|
1130
|
+
logprobs: (openaiOptions == null ? void 0 : openaiOptions.logprobs) === true ? 0 : (openaiOptions == null ? void 0 : openaiOptions.logprobs) === false ? void 0 : openaiOptions == null ? void 0 : openaiOptions.logprobs,
|
|
1071
1131
|
suffix: openaiOptions.suffix,
|
|
1072
1132
|
user: openaiOptions.user,
|
|
1073
1133
|
// standardized settings:
|
|
@@ -1086,6 +1146,7 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1086
1146
|
};
|
|
1087
1147
|
}
|
|
1088
1148
|
async doGenerate(options) {
|
|
1149
|
+
var _a, _b, _c;
|
|
1089
1150
|
const { args, warnings } = await this.getArgs(options);
|
|
1090
1151
|
const {
|
|
1091
1152
|
responseHeaders,
|
|
@@ -1106,11 +1167,16 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1106
1167
|
fetch: this.config.fetch
|
|
1107
1168
|
});
|
|
1108
1169
|
const choice = response.choices[0];
|
|
1170
|
+
const providerMetadata = { openai: {} };
|
|
1171
|
+
if (choice.logprobs != null) {
|
|
1172
|
+
providerMetadata.openai.logprobs = choice.logprobs;
|
|
1173
|
+
}
|
|
1109
1174
|
return {
|
|
1110
1175
|
content: [{ type: "text", text: choice.text }],
|
|
1111
1176
|
usage: {
|
|
1112
|
-
inputTokens: response.usage.prompt_tokens,
|
|
1113
|
-
outputTokens: response.usage.completion_tokens
|
|
1177
|
+
inputTokens: (_a = response.usage) == null ? void 0 : _a.prompt_tokens,
|
|
1178
|
+
outputTokens: (_b = response.usage) == null ? void 0 : _b.completion_tokens,
|
|
1179
|
+
totalTokens: (_c = response.usage) == null ? void 0 : _c.total_tokens
|
|
1114
1180
|
},
|
|
1115
1181
|
finishReason: mapOpenAIFinishReason(choice.finish_reason),
|
|
1116
1182
|
request: { body: args },
|
|
@@ -1119,6 +1185,7 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1119
1185
|
headers: responseHeaders,
|
|
1120
1186
|
body: rawResponse
|
|
1121
1187
|
},
|
|
1188
|
+
providerMetadata,
|
|
1122
1189
|
warnings
|
|
1123
1190
|
};
|
|
1124
1191
|
}
|
|
@@ -1145,9 +1212,11 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1145
1212
|
fetch: this.config.fetch
|
|
1146
1213
|
});
|
|
1147
1214
|
let finishReason = "unknown";
|
|
1215
|
+
const providerMetadata = { openai: {} };
|
|
1148
1216
|
const usage = {
|
|
1149
1217
|
inputTokens: void 0,
|
|
1150
|
-
outputTokens: void 0
|
|
1218
|
+
outputTokens: void 0,
|
|
1219
|
+
totalTokens: void 0
|
|
1151
1220
|
};
|
|
1152
1221
|
let isFirstChunk = true;
|
|
1153
1222
|
return {
|
|
@@ -1178,11 +1247,15 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1178
1247
|
if (value.usage != null) {
|
|
1179
1248
|
usage.inputTokens = value.usage.prompt_tokens;
|
|
1180
1249
|
usage.outputTokens = value.usage.completion_tokens;
|
|
1250
|
+
usage.totalTokens = value.usage.total_tokens;
|
|
1181
1251
|
}
|
|
1182
1252
|
const choice = value.choices[0];
|
|
1183
1253
|
if ((choice == null ? void 0 : choice.finish_reason) != null) {
|
|
1184
1254
|
finishReason = mapOpenAIFinishReason(choice.finish_reason);
|
|
1185
1255
|
}
|
|
1256
|
+
if ((choice == null ? void 0 : choice.logprobs) != null) {
|
|
1257
|
+
providerMetadata.openai.logprobs = choice.logprobs;
|
|
1258
|
+
}
|
|
1186
1259
|
if ((choice == null ? void 0 : choice.text) != null) {
|
|
1187
1260
|
controller.enqueue({
|
|
1188
1261
|
type: "text",
|
|
@@ -1194,6 +1267,7 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1194
1267
|
controller.enqueue({
|
|
1195
1268
|
type: "finish",
|
|
1196
1269
|
finishReason,
|
|
1270
|
+
providerMetadata,
|
|
1197
1271
|
usage
|
|
1198
1272
|
});
|
|
1199
1273
|
}
|
|
@@ -1204,6 +1278,11 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1204
1278
|
};
|
|
1205
1279
|
}
|
|
1206
1280
|
};
|
|
1281
|
+
var usageSchema = import_zod5.z.object({
|
|
1282
|
+
prompt_tokens: import_zod5.z.number(),
|
|
1283
|
+
completion_tokens: import_zod5.z.number(),
|
|
1284
|
+
total_tokens: import_zod5.z.number()
|
|
1285
|
+
});
|
|
1207
1286
|
var openaiCompletionResponseSchema = import_zod5.z.object({
|
|
1208
1287
|
id: import_zod5.z.string().nullish(),
|
|
1209
1288
|
created: import_zod5.z.number().nullish(),
|
|
@@ -1211,13 +1290,15 @@ var openaiCompletionResponseSchema = import_zod5.z.object({
|
|
|
1211
1290
|
choices: import_zod5.z.array(
|
|
1212
1291
|
import_zod5.z.object({
|
|
1213
1292
|
text: import_zod5.z.string(),
|
|
1214
|
-
finish_reason: import_zod5.z.string()
|
|
1293
|
+
finish_reason: import_zod5.z.string(),
|
|
1294
|
+
logprobs: import_zod5.z.object({
|
|
1295
|
+
tokens: import_zod5.z.array(import_zod5.z.string()),
|
|
1296
|
+
token_logprobs: import_zod5.z.array(import_zod5.z.number()),
|
|
1297
|
+
top_logprobs: import_zod5.z.array(import_zod5.z.record(import_zod5.z.string(), import_zod5.z.number())).nullish()
|
|
1298
|
+
}).nullish()
|
|
1215
1299
|
})
|
|
1216
1300
|
),
|
|
1217
|
-
usage:
|
|
1218
|
-
prompt_tokens: import_zod5.z.number(),
|
|
1219
|
-
completion_tokens: import_zod5.z.number()
|
|
1220
|
-
})
|
|
1301
|
+
usage: usageSchema.nullish()
|
|
1221
1302
|
});
|
|
1222
1303
|
var openaiCompletionChunkSchema = import_zod5.z.union([
|
|
1223
1304
|
import_zod5.z.object({
|
|
@@ -1228,13 +1309,15 @@ var openaiCompletionChunkSchema = import_zod5.z.union([
|
|
|
1228
1309
|
import_zod5.z.object({
|
|
1229
1310
|
text: import_zod5.z.string(),
|
|
1230
1311
|
finish_reason: import_zod5.z.string().nullish(),
|
|
1231
|
-
index: import_zod5.z.number()
|
|
1312
|
+
index: import_zod5.z.number(),
|
|
1313
|
+
logprobs: import_zod5.z.object({
|
|
1314
|
+
tokens: import_zod5.z.array(import_zod5.z.string()),
|
|
1315
|
+
token_logprobs: import_zod5.z.array(import_zod5.z.number()),
|
|
1316
|
+
top_logprobs: import_zod5.z.array(import_zod5.z.record(import_zod5.z.string(), import_zod5.z.number())).nullish()
|
|
1317
|
+
}).nullish()
|
|
1232
1318
|
})
|
|
1233
1319
|
),
|
|
1234
|
-
usage:
|
|
1235
|
-
prompt_tokens: import_zod5.z.number(),
|
|
1236
|
-
completion_tokens: import_zod5.z.number()
|
|
1237
|
-
}).nullish()
|
|
1320
|
+
usage: usageSchema.nullish()
|
|
1238
1321
|
}),
|
|
1239
1322
|
openaiErrorDataSchema
|
|
1240
1323
|
]);
|
|
@@ -1405,12 +1488,23 @@ var OpenAIImageModel = class {
|
|
|
1405
1488
|
timestamp: currentDate,
|
|
1406
1489
|
modelId: this.modelId,
|
|
1407
1490
|
headers: responseHeaders
|
|
1491
|
+
},
|
|
1492
|
+
providerMetadata: {
|
|
1493
|
+
openai: {
|
|
1494
|
+
images: response.data.map(
|
|
1495
|
+
(item) => item.revised_prompt ? {
|
|
1496
|
+
revisedPrompt: item.revised_prompt
|
|
1497
|
+
} : null
|
|
1498
|
+
)
|
|
1499
|
+
}
|
|
1408
1500
|
}
|
|
1409
1501
|
};
|
|
1410
1502
|
}
|
|
1411
1503
|
};
|
|
1412
1504
|
var openaiImageResponseSchema = import_zod8.z.object({
|
|
1413
|
-
data: import_zod8.z.array(
|
|
1505
|
+
data: import_zod8.z.array(
|
|
1506
|
+
import_zod8.z.object({ b64_json: import_zod8.z.string(), revised_prompt: import_zod8.z.string().optional() })
|
|
1507
|
+
)
|
|
1414
1508
|
});
|
|
1415
1509
|
|
|
1416
1510
|
// src/openai-tools.ts
|
|
@@ -1444,25 +1538,25 @@ var openAITranscriptionProviderOptions = import_zod10.z.object({
|
|
|
1444
1538
|
/**
|
|
1445
1539
|
* Additional information to include in the transcription response.
|
|
1446
1540
|
*/
|
|
1447
|
-
include: import_zod10.z.array(import_zod10.z.string()).
|
|
1541
|
+
include: import_zod10.z.array(import_zod10.z.string()).optional(),
|
|
1448
1542
|
/**
|
|
1449
1543
|
* The language of the input audio in ISO-639-1 format.
|
|
1450
1544
|
*/
|
|
1451
|
-
language: import_zod10.z.string().
|
|
1545
|
+
language: import_zod10.z.string().optional(),
|
|
1452
1546
|
/**
|
|
1453
1547
|
* An optional text to guide the model's style or continue a previous audio segment.
|
|
1454
1548
|
*/
|
|
1455
|
-
prompt: import_zod10.z.string().
|
|
1549
|
+
prompt: import_zod10.z.string().optional(),
|
|
1456
1550
|
/**
|
|
1457
1551
|
* The sampling temperature, between 0 and 1.
|
|
1458
1552
|
* @default 0
|
|
1459
1553
|
*/
|
|
1460
|
-
temperature: import_zod10.z.number().min(0).max(1).default(0).
|
|
1554
|
+
temperature: import_zod10.z.number().min(0).max(1).default(0).optional(),
|
|
1461
1555
|
/**
|
|
1462
1556
|
* The timestamp granularities to populate for this transcription.
|
|
1463
1557
|
* @default ['segment']
|
|
1464
1558
|
*/
|
|
1465
|
-
timestampGranularities: import_zod10.z.array(import_zod10.z.enum(["word", "segment"])).default(["segment"]).
|
|
1559
|
+
timestampGranularities: import_zod10.z.array(import_zod10.z.enum(["word", "segment"])).default(["segment"]).optional()
|
|
1466
1560
|
});
|
|
1467
1561
|
|
|
1468
1562
|
// src/openai-transcription-model.ts
|
|
@@ -2033,7 +2127,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2033
2127
|
])
|
|
2034
2128
|
),
|
|
2035
2129
|
incomplete_details: import_zod12.z.object({ reason: import_zod12.z.string() }).nullable(),
|
|
2036
|
-
usage:
|
|
2130
|
+
usage: usageSchema2
|
|
2037
2131
|
})
|
|
2038
2132
|
),
|
|
2039
2133
|
abortSignal: options.abortSignal,
|
|
@@ -2087,7 +2181,10 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2087
2181
|
}),
|
|
2088
2182
|
usage: {
|
|
2089
2183
|
inputTokens: response.usage.input_tokens,
|
|
2090
|
-
outputTokens: response.usage.output_tokens
|
|
2184
|
+
outputTokens: response.usage.output_tokens,
|
|
2185
|
+
totalTokens: response.usage.input_tokens + response.usage.output_tokens,
|
|
2186
|
+
reasoningTokens: (_f = (_e = response.usage.output_tokens_details) == null ? void 0 : _e.reasoning_tokens) != null ? _f : void 0,
|
|
2187
|
+
cachedInputTokens: (_h = (_g = response.usage.input_tokens_details) == null ? void 0 : _g.cached_tokens) != null ? _h : void 0
|
|
2091
2188
|
},
|
|
2092
2189
|
request: { body },
|
|
2093
2190
|
response: {
|
|
@@ -2099,9 +2196,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2099
2196
|
},
|
|
2100
2197
|
providerMetadata: {
|
|
2101
2198
|
openai: {
|
|
2102
|
-
responseId: response.id
|
|
2103
|
-
cachedPromptTokens: (_f = (_e = response.usage.input_tokens_details) == null ? void 0 : _e.cached_tokens) != null ? _f : null,
|
|
2104
|
-
reasoningTokens: (_h = (_g = response.usage.output_tokens_details) == null ? void 0 : _g.reasoning_tokens) != null ? _h : null
|
|
2199
|
+
responseId: response.id
|
|
2105
2200
|
}
|
|
2106
2201
|
},
|
|
2107
2202
|
warnings
|
|
@@ -2130,10 +2225,9 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2130
2225
|
let finishReason = "unknown";
|
|
2131
2226
|
const usage = {
|
|
2132
2227
|
inputTokens: void 0,
|
|
2133
|
-
outputTokens: void 0
|
|
2228
|
+
outputTokens: void 0,
|
|
2229
|
+
totalTokens: void 0
|
|
2134
2230
|
};
|
|
2135
|
-
let cachedPromptTokens = null;
|
|
2136
|
-
let reasoningTokens = null;
|
|
2137
2231
|
let responseId = null;
|
|
2138
2232
|
const ongoingToolCalls = {};
|
|
2139
2233
|
let hasToolCalls = false;
|
|
@@ -2211,8 +2305,9 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2211
2305
|
});
|
|
2212
2306
|
usage.inputTokens = value.response.usage.input_tokens;
|
|
2213
2307
|
usage.outputTokens = value.response.usage.output_tokens;
|
|
2214
|
-
|
|
2215
|
-
reasoningTokens = (
|
|
2308
|
+
usage.totalTokens = value.response.usage.input_tokens + value.response.usage.output_tokens;
|
|
2309
|
+
usage.reasoningTokens = (_c = (_b = value.response.usage.output_tokens_details) == null ? void 0 : _b.reasoning_tokens) != null ? _c : void 0;
|
|
2310
|
+
usage.cachedInputTokens = (_e = (_d = value.response.usage.input_tokens_details) == null ? void 0 : _d.cached_tokens) != null ? _e : void 0;
|
|
2216
2311
|
} else if (isResponseAnnotationAddedChunk(value)) {
|
|
2217
2312
|
controller.enqueue({
|
|
2218
2313
|
type: "source",
|
|
@@ -2228,13 +2323,9 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2228
2323
|
type: "finish",
|
|
2229
2324
|
finishReason,
|
|
2230
2325
|
usage,
|
|
2231
|
-
|
|
2232
|
-
|
|
2233
|
-
|
|
2234
|
-
responseId,
|
|
2235
|
-
cachedPromptTokens,
|
|
2236
|
-
reasoningTokens
|
|
2237
|
-
}
|
|
2326
|
+
providerMetadata: {
|
|
2327
|
+
openai: {
|
|
2328
|
+
responseId
|
|
2238
2329
|
}
|
|
2239
2330
|
}
|
|
2240
2331
|
});
|
|
@@ -2246,7 +2337,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2246
2337
|
};
|
|
2247
2338
|
}
|
|
2248
2339
|
};
|
|
2249
|
-
var
|
|
2340
|
+
var usageSchema2 = import_zod12.z.object({
|
|
2250
2341
|
input_tokens: import_zod12.z.number(),
|
|
2251
2342
|
input_tokens_details: import_zod12.z.object({ cached_tokens: import_zod12.z.number().nullish() }).nullish(),
|
|
2252
2343
|
output_tokens: import_zod12.z.number(),
|
|
@@ -2260,7 +2351,7 @@ var responseFinishedChunkSchema = import_zod12.z.object({
|
|
|
2260
2351
|
type: import_zod12.z.enum(["response.completed", "response.incomplete"]),
|
|
2261
2352
|
response: import_zod12.z.object({
|
|
2262
2353
|
incomplete_details: import_zod12.z.object({ reason: import_zod12.z.string() }).nullish(),
|
|
2263
|
-
usage:
|
|
2354
|
+
usage: usageSchema2
|
|
2264
2355
|
})
|
|
2265
2356
|
});
|
|
2266
2357
|
var responseCreatedChunkSchema = import_zod12.z.object({
|