@ai-sdk/openai 3.0.0-beta.56 → 3.0.0-beta.58
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +15 -0
- package/dist/index.d.mts +3 -3
- package/dist/index.d.ts +3 -3
- package/dist/index.js +23 -5
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +23 -5
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +3 -3
- package/dist/internal/index.d.ts +3 -3
- package/dist/internal/index.js +22 -4
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +22 -4
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +3 -3
package/CHANGELOG.md
CHANGED
|
@@ -1,5 +1,20 @@
|
|
|
1
1
|
# @ai-sdk/openai
|
|
2
2
|
|
|
3
|
+
## 3.0.0-beta.58
|
|
4
|
+
|
|
5
|
+
### Patch Changes
|
|
6
|
+
|
|
7
|
+
- 79b4e46: feat(openai): add 'gpt-5.1' modelID
|
|
8
|
+
|
|
9
|
+
## 3.0.0-beta.57
|
|
10
|
+
|
|
11
|
+
### Patch Changes
|
|
12
|
+
|
|
13
|
+
- b681d7d: feat: expose usage tokens for 'generateImage' function
|
|
14
|
+
- Updated dependencies [b681d7d]
|
|
15
|
+
- @ai-sdk/provider@3.0.0-beta.16
|
|
16
|
+
- @ai-sdk/provider-utils@4.0.0-beta.33
|
|
17
|
+
|
|
3
18
|
## 3.0.0-beta.56
|
|
4
19
|
|
|
5
20
|
### Patch Changes
|
package/dist/index.d.mts
CHANGED
|
@@ -3,13 +3,13 @@ import { ProviderV3, LanguageModelV3, EmbeddingModelV3, ImageModelV3, Transcript
|
|
|
3
3
|
import * as _ai_sdk_provider_utils from '@ai-sdk/provider-utils';
|
|
4
4
|
import { InferSchema, FetchFunction } from '@ai-sdk/provider-utils';
|
|
5
5
|
|
|
6
|
-
type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4.5-preview' | 'gpt-4.5-preview-2025-02-27' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | 'gpt-5' | 'gpt-5-2025-08-07' | 'gpt-5-mini' | 'gpt-5-mini-2025-08-07' | 'gpt-5-nano' | 'gpt-5-nano-2025-08-07' | 'gpt-5-chat-latest' | (string & {});
|
|
6
|
+
type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4.5-preview' | 'gpt-4.5-preview-2025-02-27' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | 'gpt-5' | 'gpt-5-2025-08-07' | 'gpt-5-mini' | 'gpt-5-mini-2025-08-07' | 'gpt-5-nano' | 'gpt-5-nano-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5.1' | 'gpt-5.1-chat-latest' | (string & {});
|
|
7
7
|
declare const openaiChatLanguageModelOptions: _ai_sdk_provider_utils.LazySchema<{
|
|
8
8
|
logitBias?: Record<number, number> | undefined;
|
|
9
9
|
logprobs?: number | boolean | undefined;
|
|
10
10
|
parallelToolCalls?: boolean | undefined;
|
|
11
11
|
user?: string | undefined;
|
|
12
|
-
reasoningEffort?: "minimal" | "low" | "medium" | "high" | undefined;
|
|
12
|
+
reasoningEffort?: "none" | "minimal" | "low" | "medium" | "high" | undefined;
|
|
13
13
|
maxCompletionTokens?: number | undefined;
|
|
14
14
|
store?: boolean | undefined;
|
|
15
15
|
metadata?: Record<string, string> | undefined;
|
|
@@ -379,7 +379,7 @@ declare const openaiTools: {
|
|
|
379
379
|
}>;
|
|
380
380
|
};
|
|
381
381
|
|
|
382
|
-
type OpenAIResponsesModelId = 'chatgpt-4o-latest' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo' | 'gpt-4-0613' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1' | 'gpt-4' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini-2024-07-18' | 'gpt-4o-mini' | 'gpt-4o' | 'gpt-5-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5-codex' | 'gpt-5-mini-2025-08-07' | 'gpt-5-mini' | 'gpt-5-nano-2025-08-07' | 'gpt-5-nano' | 'gpt-5-pro-2025-10-06' | 'gpt-5-pro' | 'gpt-5' | 'o1-2024-12-17' | 'o1' | 'o3-2025-04-16' | 'o3-mini-2025-01-31' | 'o3-mini' | 'o3' | (string & {});
|
|
382
|
+
type OpenAIResponsesModelId = 'chatgpt-4o-latest' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo' | 'gpt-4-0613' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1' | 'gpt-4' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini-2024-07-18' | 'gpt-4o-mini' | 'gpt-4o' | 'gpt-5.1' | 'gpt-5.1-chat-latest' | 'gpt-5.1-codex-mini' | 'gpt-5.1-codex' | 'gpt-5-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5-codex' | 'gpt-5-mini-2025-08-07' | 'gpt-5-mini' | 'gpt-5-nano-2025-08-07' | 'gpt-5-nano' | 'gpt-5-pro-2025-10-06' | 'gpt-5-pro' | 'gpt-5' | 'o1-2024-12-17' | 'o1' | 'o3-2025-04-16' | 'o3-mini-2025-01-31' | 'o3-mini' | 'o3' | (string & {});
|
|
383
383
|
declare const openaiResponsesProviderOptionsSchema: _ai_sdk_provider_utils.LazySchema<{
|
|
384
384
|
include?: ("file_search_call.results" | "message.output_text.logprobs" | "reasoning.encrypted_content")[] | null | undefined;
|
|
385
385
|
instructions?: string | null | undefined;
|
package/dist/index.d.ts
CHANGED
|
@@ -3,13 +3,13 @@ import { ProviderV3, LanguageModelV3, EmbeddingModelV3, ImageModelV3, Transcript
|
|
|
3
3
|
import * as _ai_sdk_provider_utils from '@ai-sdk/provider-utils';
|
|
4
4
|
import { InferSchema, FetchFunction } from '@ai-sdk/provider-utils';
|
|
5
5
|
|
|
6
|
-
type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4.5-preview' | 'gpt-4.5-preview-2025-02-27' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | 'gpt-5' | 'gpt-5-2025-08-07' | 'gpt-5-mini' | 'gpt-5-mini-2025-08-07' | 'gpt-5-nano' | 'gpt-5-nano-2025-08-07' | 'gpt-5-chat-latest' | (string & {});
|
|
6
|
+
type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4.5-preview' | 'gpt-4.5-preview-2025-02-27' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | 'gpt-5' | 'gpt-5-2025-08-07' | 'gpt-5-mini' | 'gpt-5-mini-2025-08-07' | 'gpt-5-nano' | 'gpt-5-nano-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5.1' | 'gpt-5.1-chat-latest' | (string & {});
|
|
7
7
|
declare const openaiChatLanguageModelOptions: _ai_sdk_provider_utils.LazySchema<{
|
|
8
8
|
logitBias?: Record<number, number> | undefined;
|
|
9
9
|
logprobs?: number | boolean | undefined;
|
|
10
10
|
parallelToolCalls?: boolean | undefined;
|
|
11
11
|
user?: string | undefined;
|
|
12
|
-
reasoningEffort?: "minimal" | "low" | "medium" | "high" | undefined;
|
|
12
|
+
reasoningEffort?: "none" | "minimal" | "low" | "medium" | "high" | undefined;
|
|
13
13
|
maxCompletionTokens?: number | undefined;
|
|
14
14
|
store?: boolean | undefined;
|
|
15
15
|
metadata?: Record<string, string> | undefined;
|
|
@@ -379,7 +379,7 @@ declare const openaiTools: {
|
|
|
379
379
|
}>;
|
|
380
380
|
};
|
|
381
381
|
|
|
382
|
-
type OpenAIResponsesModelId = 'chatgpt-4o-latest' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo' | 'gpt-4-0613' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1' | 'gpt-4' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini-2024-07-18' | 'gpt-4o-mini' | 'gpt-4o' | 'gpt-5-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5-codex' | 'gpt-5-mini-2025-08-07' | 'gpt-5-mini' | 'gpt-5-nano-2025-08-07' | 'gpt-5-nano' | 'gpt-5-pro-2025-10-06' | 'gpt-5-pro' | 'gpt-5' | 'o1-2024-12-17' | 'o1' | 'o3-2025-04-16' | 'o3-mini-2025-01-31' | 'o3-mini' | 'o3' | (string & {});
|
|
382
|
+
type OpenAIResponsesModelId = 'chatgpt-4o-latest' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo' | 'gpt-4-0613' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1' | 'gpt-4' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini-2024-07-18' | 'gpt-4o-mini' | 'gpt-4o' | 'gpt-5.1' | 'gpt-5.1-chat-latest' | 'gpt-5.1-codex-mini' | 'gpt-5.1-codex' | 'gpt-5-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5-codex' | 'gpt-5-mini-2025-08-07' | 'gpt-5-mini' | 'gpt-5-nano-2025-08-07' | 'gpt-5-nano' | 'gpt-5-pro-2025-10-06' | 'gpt-5-pro' | 'gpt-5' | 'o1-2024-12-17' | 'o1' | 'o3-2025-04-16' | 'o3-mini-2025-01-31' | 'o3-mini' | 'o3' | (string & {});
|
|
383
383
|
declare const openaiResponsesProviderOptionsSchema: _ai_sdk_provider_utils.LazySchema<{
|
|
384
384
|
include?: ("file_search_call.results" | "message.output_text.logprobs" | "reasoning.encrypted_content")[] | null | undefined;
|
|
385
385
|
instructions?: string | null | undefined;
|
package/dist/index.js
CHANGED
|
@@ -446,7 +446,7 @@ var openaiChatLanguageModelOptions = (0, import_provider_utils4.lazySchema)(
|
|
|
446
446
|
/**
|
|
447
447
|
* Reasoning effort for reasoning models. Defaults to `medium`.
|
|
448
448
|
*/
|
|
449
|
-
reasoningEffort: import_v43.z.enum(["minimal", "low", "medium", "high"]).optional(),
|
|
449
|
+
reasoningEffort: import_v43.z.enum(["none", "minimal", "low", "medium", "high"]).optional(),
|
|
450
450
|
/**
|
|
451
451
|
* Maximum number of completion tokens to generate. Useful for reasoning models.
|
|
452
452
|
*/
|
|
@@ -1633,7 +1633,16 @@ var openaiImageResponseSchema = (0, import_provider_utils12.lazySchema)(
|
|
|
1633
1633
|
b64_json: import_v48.z.string(),
|
|
1634
1634
|
revised_prompt: import_v48.z.string().nullish()
|
|
1635
1635
|
})
|
|
1636
|
-
)
|
|
1636
|
+
),
|
|
1637
|
+
usage: import_v48.z.object({
|
|
1638
|
+
input_tokens: import_v48.z.number().nullish(),
|
|
1639
|
+
output_tokens: import_v48.z.number().nullish(),
|
|
1640
|
+
total_tokens: import_v48.z.number().nullish(),
|
|
1641
|
+
input_tokens_details: import_v48.z.object({
|
|
1642
|
+
image_tokens: import_v48.z.number().nullish(),
|
|
1643
|
+
text_tokens: import_v48.z.number().nullish()
|
|
1644
|
+
}).nullish()
|
|
1645
|
+
}).nullish()
|
|
1637
1646
|
})
|
|
1638
1647
|
)
|
|
1639
1648
|
);
|
|
@@ -1674,7 +1683,7 @@ var OpenAIImageModel = class {
|
|
|
1674
1683
|
headers,
|
|
1675
1684
|
abortSignal
|
|
1676
1685
|
}) {
|
|
1677
|
-
var _a, _b, _c, _d;
|
|
1686
|
+
var _a, _b, _c, _d, _e, _f, _g;
|
|
1678
1687
|
const warnings = [];
|
|
1679
1688
|
if (aspectRatio != null) {
|
|
1680
1689
|
warnings.push({
|
|
@@ -1711,6 +1720,11 @@ var OpenAIImageModel = class {
|
|
|
1711
1720
|
return {
|
|
1712
1721
|
images: response.data.map((item) => item.b64_json),
|
|
1713
1722
|
warnings,
|
|
1723
|
+
usage: response.usage != null ? {
|
|
1724
|
+
inputTokens: (_e = response.usage.input_tokens) != null ? _e : void 0,
|
|
1725
|
+
outputTokens: (_f = response.usage.output_tokens) != null ? _f : void 0,
|
|
1726
|
+
totalTokens: (_g = response.usage.total_tokens) != null ? _g : void 0
|
|
1727
|
+
} : void 0,
|
|
1714
1728
|
response: {
|
|
1715
1729
|
timestamp: currentDate,
|
|
1716
1730
|
modelId: this.modelId,
|
|
@@ -3088,7 +3102,11 @@ var openaiResponsesReasoningModelIds = [
|
|
|
3088
3102
|
"gpt-5-nano",
|
|
3089
3103
|
"gpt-5-nano-2025-08-07",
|
|
3090
3104
|
"gpt-5-pro",
|
|
3091
|
-
"gpt-5-pro-2025-10-06"
|
|
3105
|
+
"gpt-5-pro-2025-10-06",
|
|
3106
|
+
"gpt-5.1",
|
|
3107
|
+
"gpt-5.1-chat-latest",
|
|
3108
|
+
"gpt-5.1-codex-mini",
|
|
3109
|
+
"gpt-5.1-codex"
|
|
3092
3110
|
];
|
|
3093
3111
|
var openaiResponsesModelIds = [
|
|
3094
3112
|
"gpt-4.1",
|
|
@@ -4923,7 +4941,7 @@ var OpenAITranscriptionModel = class {
|
|
|
4923
4941
|
};
|
|
4924
4942
|
|
|
4925
4943
|
// src/version.ts
|
|
4926
|
-
var VERSION = true ? "3.0.0-beta.
|
|
4944
|
+
var VERSION = true ? "3.0.0-beta.58" : "0.0.0-test";
|
|
4927
4945
|
|
|
4928
4946
|
// src/openai-provider.ts
|
|
4929
4947
|
function createOpenAI(options = {}) {
|