@ai-sdk/openai 2.0.0 → 2.0.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +12 -0
- package/dist/index.js +82 -9
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +82 -9
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.js +82 -9
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +82 -9
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +1 -1
package/dist/internal/index.mjs
CHANGED
|
@@ -692,7 +692,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
692
692
|
};
|
|
693
693
|
}
|
|
694
694
|
async doGenerate(options) {
|
|
695
|
-
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m;
|
|
695
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n;
|
|
696
696
|
const { args: body, warnings } = await this.getArgs(options);
|
|
697
697
|
const {
|
|
698
698
|
responseHeaders,
|
|
@@ -726,8 +726,17 @@ var OpenAIChatLanguageModel = class {
|
|
|
726
726
|
input: toolCall.function.arguments
|
|
727
727
|
});
|
|
728
728
|
}
|
|
729
|
-
const
|
|
730
|
-
|
|
729
|
+
for (const annotation of (_c = choice.message.annotations) != null ? _c : []) {
|
|
730
|
+
content.push({
|
|
731
|
+
type: "source",
|
|
732
|
+
sourceType: "url",
|
|
733
|
+
id: generateId(),
|
|
734
|
+
url: annotation.url,
|
|
735
|
+
title: annotation.title
|
|
736
|
+
});
|
|
737
|
+
}
|
|
738
|
+
const completionTokenDetails = (_d = response.usage) == null ? void 0 : _d.completion_tokens_details;
|
|
739
|
+
const promptTokenDetails = (_e = response.usage) == null ? void 0 : _e.prompt_tokens_details;
|
|
731
740
|
const providerMetadata = { openai: {} };
|
|
732
741
|
if ((completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens) != null) {
|
|
733
742
|
providerMetadata.openai.acceptedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens;
|
|
@@ -735,18 +744,18 @@ var OpenAIChatLanguageModel = class {
|
|
|
735
744
|
if ((completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens) != null) {
|
|
736
745
|
providerMetadata.openai.rejectedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens;
|
|
737
746
|
}
|
|
738
|
-
if (((
|
|
747
|
+
if (((_f = choice.logprobs) == null ? void 0 : _f.content) != null) {
|
|
739
748
|
providerMetadata.openai.logprobs = choice.logprobs.content;
|
|
740
749
|
}
|
|
741
750
|
return {
|
|
742
751
|
content,
|
|
743
752
|
finishReason: mapOpenAIFinishReason(choice.finish_reason),
|
|
744
753
|
usage: {
|
|
745
|
-
inputTokens: (
|
|
746
|
-
outputTokens: (
|
|
747
|
-
totalTokens: (
|
|
748
|
-
reasoningTokens: (
|
|
749
|
-
cachedInputTokens: (
|
|
754
|
+
inputTokens: (_h = (_g = response.usage) == null ? void 0 : _g.prompt_tokens) != null ? _h : void 0,
|
|
755
|
+
outputTokens: (_j = (_i = response.usage) == null ? void 0 : _i.completion_tokens) != null ? _j : void 0,
|
|
756
|
+
totalTokens: (_l = (_k = response.usage) == null ? void 0 : _k.total_tokens) != null ? _l : void 0,
|
|
757
|
+
reasoningTokens: (_m = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null ? _m : void 0,
|
|
758
|
+
cachedInputTokens: (_n = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens) != null ? _n : void 0
|
|
750
759
|
},
|
|
751
760
|
request: { body },
|
|
752
761
|
response: {
|
|
@@ -943,6 +952,17 @@ var OpenAIChatLanguageModel = class {
|
|
|
943
952
|
}
|
|
944
953
|
}
|
|
945
954
|
}
|
|
955
|
+
if (delta.annotations != null) {
|
|
956
|
+
for (const annotation of delta.annotations) {
|
|
957
|
+
controller.enqueue({
|
|
958
|
+
type: "source",
|
|
959
|
+
sourceType: "url",
|
|
960
|
+
id: generateId(),
|
|
961
|
+
url: annotation.url,
|
|
962
|
+
title: annotation.title
|
|
963
|
+
});
|
|
964
|
+
}
|
|
965
|
+
}
|
|
946
966
|
},
|
|
947
967
|
flush(controller) {
|
|
948
968
|
if (isActiveText) {
|
|
@@ -993,6 +1013,15 @@ var openaiChatResponseSchema = z5.object({
|
|
|
993
1013
|
arguments: z5.string()
|
|
994
1014
|
})
|
|
995
1015
|
})
|
|
1016
|
+
).nullish(),
|
|
1017
|
+
annotations: z5.array(
|
|
1018
|
+
z5.object({
|
|
1019
|
+
type: z5.literal("url_citation"),
|
|
1020
|
+
start_index: z5.number(),
|
|
1021
|
+
end_index: z5.number(),
|
|
1022
|
+
url: z5.string(),
|
|
1023
|
+
title: z5.string()
|
|
1024
|
+
})
|
|
996
1025
|
).nullish()
|
|
997
1026
|
}),
|
|
998
1027
|
index: z5.number(),
|
|
@@ -1035,6 +1064,15 @@ var openaiChatChunkSchema = z5.union([
|
|
|
1035
1064
|
arguments: z5.string().nullish()
|
|
1036
1065
|
})
|
|
1037
1066
|
})
|
|
1067
|
+
).nullish(),
|
|
1068
|
+
annotations: z5.array(
|
|
1069
|
+
z5.object({
|
|
1070
|
+
type: z5.literal("url_citation"),
|
|
1071
|
+
start_index: z5.number(),
|
|
1072
|
+
end_index: z5.number(),
|
|
1073
|
+
url: z5.string(),
|
|
1074
|
+
title: z5.string()
|
|
1075
|
+
})
|
|
1038
1076
|
).nullish()
|
|
1039
1077
|
}).nullish(),
|
|
1040
1078
|
logprobs: z5.object({
|
|
@@ -2551,6 +2589,11 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2551
2589
|
id: z15.string(),
|
|
2552
2590
|
status: z15.string().optional()
|
|
2553
2591
|
}),
|
|
2592
|
+
z15.object({
|
|
2593
|
+
type: z15.literal("file_search_call"),
|
|
2594
|
+
id: z15.string(),
|
|
2595
|
+
status: z15.string().optional()
|
|
2596
|
+
}),
|
|
2554
2597
|
z15.object({
|
|
2555
2598
|
type: z15.literal("reasoning"),
|
|
2556
2599
|
id: z15.string(),
|
|
@@ -2677,6 +2720,26 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2677
2720
|
});
|
|
2678
2721
|
break;
|
|
2679
2722
|
}
|
|
2723
|
+
case "file_search_call": {
|
|
2724
|
+
content.push({
|
|
2725
|
+
type: "tool-call",
|
|
2726
|
+
toolCallId: part.id,
|
|
2727
|
+
toolName: "file_search",
|
|
2728
|
+
input: "",
|
|
2729
|
+
providerExecuted: true
|
|
2730
|
+
});
|
|
2731
|
+
content.push({
|
|
2732
|
+
type: "tool-result",
|
|
2733
|
+
toolCallId: part.id,
|
|
2734
|
+
toolName: "file_search",
|
|
2735
|
+
result: {
|
|
2736
|
+
type: "file_search_tool_result",
|
|
2737
|
+
status: part.status || "completed"
|
|
2738
|
+
},
|
|
2739
|
+
providerExecuted: true
|
|
2740
|
+
});
|
|
2741
|
+
break;
|
|
2742
|
+
}
|
|
2680
2743
|
}
|
|
2681
2744
|
}
|
|
2682
2745
|
return {
|
|
@@ -3053,6 +3116,11 @@ var responseOutputItemAddedSchema = z15.object({
|
|
|
3053
3116
|
type: z15.literal("computer_call"),
|
|
3054
3117
|
id: z15.string(),
|
|
3055
3118
|
status: z15.string()
|
|
3119
|
+
}),
|
|
3120
|
+
z15.object({
|
|
3121
|
+
type: z15.literal("file_search_call"),
|
|
3122
|
+
id: z15.string(),
|
|
3123
|
+
status: z15.string()
|
|
3056
3124
|
})
|
|
3057
3125
|
])
|
|
3058
3126
|
});
|
|
@@ -3086,6 +3154,11 @@ var responseOutputItemDoneSchema = z15.object({
|
|
|
3086
3154
|
type: z15.literal("computer_call"),
|
|
3087
3155
|
id: z15.string(),
|
|
3088
3156
|
status: z15.literal("completed")
|
|
3157
|
+
}),
|
|
3158
|
+
z15.object({
|
|
3159
|
+
type: z15.literal("file_search_call"),
|
|
3160
|
+
id: z15.string(),
|
|
3161
|
+
status: z15.literal("completed")
|
|
3089
3162
|
})
|
|
3090
3163
|
])
|
|
3091
3164
|
});
|