@ai-sdk/openai 2.0.19 → 2.0.21
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +19 -0
- package/dist/index.js +17 -13
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +17 -13
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.js +17 -13
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +17 -13
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +2 -2
package/CHANGELOG.md
CHANGED
|
@@ -1,5 +1,24 @@
|
|
|
1
1
|
# @ai-sdk/openai
|
|
2
2
|
|
|
3
|
+
## 2.0.21
|
|
4
|
+
|
|
5
|
+
### Patch Changes
|
|
6
|
+
|
|
7
|
+
- d18856a: fix(provider/openai): support websearch tool results without query property
|
|
8
|
+
- 15271d6: fix(provider/openai): do not set `response_format` to `verbose_json` if model is `gpt-4o-transcribe` or `gpt-4o-mini-transcribe`
|
|
9
|
+
|
|
10
|
+
These two models do not support it:
|
|
11
|
+
https://platform.openai.com/docs/api-reference/audio/createTranscription#audio_createtranscription-response_format
|
|
12
|
+
|
|
13
|
+
- Updated dependencies [1b5a3d3]
|
|
14
|
+
- @ai-sdk/provider-utils@3.0.6
|
|
15
|
+
|
|
16
|
+
## 2.0.20
|
|
17
|
+
|
|
18
|
+
### Patch Changes
|
|
19
|
+
|
|
20
|
+
- 974de40: fix(provider/ai): do not set `.providerMetadata.openai.logprobs` to an array of empty arrays when using `streamText()`
|
|
21
|
+
|
|
3
22
|
## 2.0.19
|
|
4
23
|
|
|
5
24
|
### Patch Changes
|
package/dist/index.js
CHANGED
|
@@ -440,7 +440,7 @@ var webSearchPreview = (0, import_provider_utils4.createProviderDefinedToolFacto
|
|
|
440
440
|
action: import_v44.z.discriminatedUnion("type", [
|
|
441
441
|
import_v44.z.object({
|
|
442
442
|
type: import_v44.z.literal("search"),
|
|
443
|
-
query: import_v44.z.string()
|
|
443
|
+
query: import_v44.z.string().nullish()
|
|
444
444
|
}),
|
|
445
445
|
import_v44.z.object({
|
|
446
446
|
type: import_v44.z.literal("open_page"),
|
|
@@ -2151,7 +2151,7 @@ var webSearchCallItem = import_v413.z.object({
|
|
|
2151
2151
|
action: import_v413.z.discriminatedUnion("type", [
|
|
2152
2152
|
import_v413.z.object({
|
|
2153
2153
|
type: import_v413.z.literal("search"),
|
|
2154
|
-
query: import_v413.z.string()
|
|
2154
|
+
query: import_v413.z.string().nullish()
|
|
2155
2155
|
}),
|
|
2156
2156
|
import_v413.z.object({
|
|
2157
2157
|
type: import_v413.z.literal("open_page"),
|
|
@@ -2679,7 +2679,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2679
2679
|
controller.enqueue({ type: "stream-start", warnings });
|
|
2680
2680
|
},
|
|
2681
2681
|
transform(chunk, controller) {
|
|
2682
|
-
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p;
|
|
2682
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r;
|
|
2683
2683
|
if (options.includeRawChunks) {
|
|
2684
2684
|
controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
|
|
2685
2685
|
}
|
|
@@ -2890,12 +2890,12 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2890
2890
|
id: value.item_id,
|
|
2891
2891
|
delta: value.delta
|
|
2892
2892
|
});
|
|
2893
|
-
if (value.logprobs) {
|
|
2893
|
+
if (((_d = (_c = options.providerOptions) == null ? void 0 : _c.openai) == null ? void 0 : _d.logprobs) && value.logprobs) {
|
|
2894
2894
|
logprobs.push(value.logprobs);
|
|
2895
2895
|
}
|
|
2896
2896
|
} else if (isResponseReasoningSummaryPartAddedChunk(value)) {
|
|
2897
2897
|
if (value.summary_index > 0) {
|
|
2898
|
-
(
|
|
2898
|
+
(_e = activeReasoning[value.item_id]) == null ? void 0 : _e.summaryParts.push(
|
|
2899
2899
|
value.summary_index
|
|
2900
2900
|
);
|
|
2901
2901
|
controller.enqueue({
|
|
@@ -2904,7 +2904,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2904
2904
|
providerMetadata: {
|
|
2905
2905
|
openai: {
|
|
2906
2906
|
itemId: value.item_id,
|
|
2907
|
-
reasoningEncryptedContent: (
|
|
2907
|
+
reasoningEncryptedContent: (_g = (_f = activeReasoning[value.item_id]) == null ? void 0 : _f.encryptedContent) != null ? _g : null
|
|
2908
2908
|
}
|
|
2909
2909
|
}
|
|
2910
2910
|
});
|
|
@@ -2922,20 +2922,20 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2922
2922
|
});
|
|
2923
2923
|
} else if (isResponseFinishedChunk(value)) {
|
|
2924
2924
|
finishReason = mapOpenAIResponseFinishReason({
|
|
2925
|
-
finishReason: (
|
|
2925
|
+
finishReason: (_h = value.response.incomplete_details) == null ? void 0 : _h.reason,
|
|
2926
2926
|
hasToolCalls
|
|
2927
2927
|
});
|
|
2928
2928
|
usage.inputTokens = value.response.usage.input_tokens;
|
|
2929
2929
|
usage.outputTokens = value.response.usage.output_tokens;
|
|
2930
2930
|
usage.totalTokens = value.response.usage.input_tokens + value.response.usage.output_tokens;
|
|
2931
|
-
usage.reasoningTokens = (
|
|
2932
|
-
usage.cachedInputTokens = (
|
|
2931
|
+
usage.reasoningTokens = (_j = (_i = value.response.usage.output_tokens_details) == null ? void 0 : _i.reasoning_tokens) != null ? _j : void 0;
|
|
2932
|
+
usage.cachedInputTokens = (_l = (_k = value.response.usage.input_tokens_details) == null ? void 0 : _k.cached_tokens) != null ? _l : void 0;
|
|
2933
2933
|
} else if (isResponseAnnotationAddedChunk(value)) {
|
|
2934
2934
|
if (value.annotation.type === "url_citation") {
|
|
2935
2935
|
controller.enqueue({
|
|
2936
2936
|
type: "source",
|
|
2937
2937
|
sourceType: "url",
|
|
2938
|
-
id: (
|
|
2938
|
+
id: (_o = (_n = (_m = self.config).generateId) == null ? void 0 : _n.call(_m)) != null ? _o : (0, import_provider_utils12.generateId)(),
|
|
2939
2939
|
url: value.annotation.url,
|
|
2940
2940
|
title: value.annotation.title
|
|
2941
2941
|
});
|
|
@@ -2943,7 +2943,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2943
2943
|
controller.enqueue({
|
|
2944
2944
|
type: "source",
|
|
2945
2945
|
sourceType: "document",
|
|
2946
|
-
id: (
|
|
2946
|
+
id: (_r = (_q = (_p = self.config).generateId) == null ? void 0 : _q.call(_p)) != null ? _r : (0, import_provider_utils12.generateId)(),
|
|
2947
2947
|
mediaType: "text/plain",
|
|
2948
2948
|
title: value.annotation.quote,
|
|
2949
2949
|
filename: value.annotation.file_id
|
|
@@ -3486,8 +3486,12 @@ var OpenAITranscriptionModel = class {
|
|
|
3486
3486
|
include: openAIOptions.include,
|
|
3487
3487
|
language: openAIOptions.language,
|
|
3488
3488
|
prompt: openAIOptions.prompt,
|
|
3489
|
-
response_format
|
|
3490
|
-
//
|
|
3489
|
+
// https://platform.openai.com/docs/api-reference/audio/createTranscription#audio_createtranscription-response_format
|
|
3490
|
+
// prefer verbose_json to get segments for models that support it
|
|
3491
|
+
response_format: [
|
|
3492
|
+
"gpt-4o-transcribe",
|
|
3493
|
+
"gpt-4o-mini-transcribe"
|
|
3494
|
+
].includes(this.modelId) ? "json" : "verbose_json",
|
|
3491
3495
|
temperature: openAIOptions.temperature,
|
|
3492
3496
|
timestamp_granularities: openAIOptions.timestampGranularities
|
|
3493
3497
|
};
|