@ai-sdk/openai 3.0.0-beta.91 → 3.0.0-beta.93
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +12 -0
- package/dist/index.d.mts +2 -2
- package/dist/index.d.ts +2 -2
- package/dist/index.js +8 -6
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +8 -6
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +2 -2
- package/dist/internal/index.d.ts +2 -2
- package/dist/internal/index.js +7 -5
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +7 -5
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +3 -3
package/CHANGELOG.md
CHANGED
|
@@ -1,5 +1,17 @@
|
|
|
1
1
|
# @ai-sdk/openai
|
|
2
2
|
|
|
3
|
+
## 3.0.0-beta.93
|
|
4
|
+
|
|
5
|
+
### Patch Changes
|
|
6
|
+
|
|
7
|
+
- d2039d7: feat(provider/openai): add GPT 5.1 Codex Max to OpenAI Responses model IDs list
|
|
8
|
+
|
|
9
|
+
## 3.0.0-beta.92
|
|
10
|
+
|
|
11
|
+
### Patch Changes
|
|
12
|
+
|
|
13
|
+
- 5bf101a: feat(provider/openai): add support for OpenAI xhigh reasoning effort
|
|
14
|
+
|
|
3
15
|
## 3.0.0-beta.91
|
|
4
16
|
|
|
5
17
|
### Patch Changes
|
package/dist/index.d.mts
CHANGED
|
@@ -9,7 +9,7 @@ declare const openaiChatLanguageModelOptions: _ai_sdk_provider_utils.LazySchema<
|
|
|
9
9
|
logprobs?: number | boolean | undefined;
|
|
10
10
|
parallelToolCalls?: boolean | undefined;
|
|
11
11
|
user?: string | undefined;
|
|
12
|
-
reasoningEffort?: "none" | "minimal" | "low" | "medium" | "high" | undefined;
|
|
12
|
+
reasoningEffort?: "none" | "minimal" | "low" | "medium" | "high" | "xhigh" | undefined;
|
|
13
13
|
maxCompletionTokens?: number | undefined;
|
|
14
14
|
store?: boolean | undefined;
|
|
15
15
|
metadata?: Record<string, string> | undefined;
|
|
@@ -443,7 +443,7 @@ declare const openaiTools: {
|
|
|
443
443
|
}>;
|
|
444
444
|
};
|
|
445
445
|
|
|
446
|
-
type OpenAIResponsesModelId = 'chatgpt-4o-latest' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo' | 'gpt-4-0613' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1' | 'gpt-4' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini-2024-07-18' | 'gpt-4o-mini' | 'gpt-4o' | 'gpt-5.1' | 'gpt-5.1-chat-latest' | 'gpt-5.1-codex-mini' | 'gpt-5.1-codex' | 'gpt-5-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5-codex' | 'gpt-5-mini-2025-08-07' | 'gpt-5-mini' | 'gpt-5-nano-2025-08-07' | 'gpt-5-nano' | 'gpt-5-pro-2025-10-06' | 'gpt-5-pro' | 'gpt-5' | 'o1-2024-12-17' | 'o1' | 'o3-2025-04-16' | 'o3-mini-2025-01-31' | 'o3-mini' | 'o3' | (string & {});
|
|
446
|
+
type OpenAIResponsesModelId = 'chatgpt-4o-latest' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo' | 'gpt-4-0613' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1' | 'gpt-4' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini-2024-07-18' | 'gpt-4o-mini' | 'gpt-4o' | 'gpt-5.1' | 'gpt-5.1-chat-latest' | 'gpt-5.1-codex-mini' | 'gpt-5.1-codex' | 'gpt-5.1-codex-max' | 'gpt-5-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5-codex' | 'gpt-5-mini-2025-08-07' | 'gpt-5-mini' | 'gpt-5-nano-2025-08-07' | 'gpt-5-nano' | 'gpt-5-pro-2025-10-06' | 'gpt-5-pro' | 'gpt-5' | 'o1-2024-12-17' | 'o1' | 'o3-2025-04-16' | 'o3-mini-2025-01-31' | 'o3-mini' | 'o3' | (string & {});
|
|
447
447
|
declare const openaiResponsesProviderOptionsSchema: _ai_sdk_provider_utils.LazySchema<{
|
|
448
448
|
conversation?: string | null | undefined;
|
|
449
449
|
include?: ("file_search_call.results" | "message.output_text.logprobs" | "reasoning.encrypted_content")[] | null | undefined;
|
package/dist/index.d.ts
CHANGED
|
@@ -9,7 +9,7 @@ declare const openaiChatLanguageModelOptions: _ai_sdk_provider_utils.LazySchema<
|
|
|
9
9
|
logprobs?: number | boolean | undefined;
|
|
10
10
|
parallelToolCalls?: boolean | undefined;
|
|
11
11
|
user?: string | undefined;
|
|
12
|
-
reasoningEffort?: "none" | "minimal" | "low" | "medium" | "high" | undefined;
|
|
12
|
+
reasoningEffort?: "none" | "minimal" | "low" | "medium" | "high" | "xhigh" | undefined;
|
|
13
13
|
maxCompletionTokens?: number | undefined;
|
|
14
14
|
store?: boolean | undefined;
|
|
15
15
|
metadata?: Record<string, string> | undefined;
|
|
@@ -443,7 +443,7 @@ declare const openaiTools: {
|
|
|
443
443
|
}>;
|
|
444
444
|
};
|
|
445
445
|
|
|
446
|
-
type OpenAIResponsesModelId = 'chatgpt-4o-latest' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo' | 'gpt-4-0613' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1' | 'gpt-4' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini-2024-07-18' | 'gpt-4o-mini' | 'gpt-4o' | 'gpt-5.1' | 'gpt-5.1-chat-latest' | 'gpt-5.1-codex-mini' | 'gpt-5.1-codex' | 'gpt-5-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5-codex' | 'gpt-5-mini-2025-08-07' | 'gpt-5-mini' | 'gpt-5-nano-2025-08-07' | 'gpt-5-nano' | 'gpt-5-pro-2025-10-06' | 'gpt-5-pro' | 'gpt-5' | 'o1-2024-12-17' | 'o1' | 'o3-2025-04-16' | 'o3-mini-2025-01-31' | 'o3-mini' | 'o3' | (string & {});
|
|
446
|
+
type OpenAIResponsesModelId = 'chatgpt-4o-latest' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo' | 'gpt-4-0613' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1' | 'gpt-4' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini-2024-07-18' | 'gpt-4o-mini' | 'gpt-4o' | 'gpt-5.1' | 'gpt-5.1-chat-latest' | 'gpt-5.1-codex-mini' | 'gpt-5.1-codex' | 'gpt-5.1-codex-max' | 'gpt-5-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5-codex' | 'gpt-5-mini-2025-08-07' | 'gpt-5-mini' | 'gpt-5-nano-2025-08-07' | 'gpt-5-nano' | 'gpt-5-pro-2025-10-06' | 'gpt-5-pro' | 'gpt-5' | 'o1-2024-12-17' | 'o1' | 'o3-2025-04-16' | 'o3-mini-2025-01-31' | 'o3-mini' | 'o3' | (string & {});
|
|
447
447
|
declare const openaiResponsesProviderOptionsSchema: _ai_sdk_provider_utils.LazySchema<{
|
|
448
448
|
conversation?: string | null | undefined;
|
|
449
449
|
include?: ("file_search_call.results" | "message.output_text.logprobs" | "reasoning.encrypted_content")[] | null | undefined;
|
package/dist/index.js
CHANGED
|
@@ -492,7 +492,7 @@ var openaiChatLanguageModelOptions = (0, import_provider_utils4.lazySchema)(
|
|
|
492
492
|
/**
|
|
493
493
|
* Reasoning effort for reasoning models. Defaults to `medium`.
|
|
494
494
|
*/
|
|
495
|
-
reasoningEffort: import_v43.z.enum(["none", "minimal", "low", "medium", "high"]).optional(),
|
|
495
|
+
reasoningEffort: import_v43.z.enum(["none", "minimal", "low", "medium", "high", "xhigh"]).optional(),
|
|
496
496
|
/**
|
|
497
497
|
* Maximum number of completion tokens to generate. Useful for reasoning models.
|
|
498
498
|
*/
|
|
@@ -3434,7 +3434,8 @@ var openaiResponsesReasoningModelIds = [
|
|
|
3434
3434
|
"gpt-5.1",
|
|
3435
3435
|
"gpt-5.1-chat-latest",
|
|
3436
3436
|
"gpt-5.1-codex-mini",
|
|
3437
|
-
"gpt-5.1-codex"
|
|
3437
|
+
"gpt-5.1-codex",
|
|
3438
|
+
"gpt-5.1-codex-max"
|
|
3438
3439
|
];
|
|
3439
3440
|
var openaiResponsesModelIds = [
|
|
3440
3441
|
"gpt-4.1",
|
|
@@ -3551,10 +3552,11 @@ var openaiResponsesProviderOptionsSchema = (0, import_provider_utils25.lazySchem
|
|
|
3551
3552
|
/**
|
|
3552
3553
|
* Reasoning effort for reasoning models. Defaults to `medium`. If you use
|
|
3553
3554
|
* `providerOptions` to set the `reasoningEffort` option, this model setting will be ignored.
|
|
3554
|
-
* Valid values: 'none' | 'minimal' | 'low' | 'medium' | 'high'
|
|
3555
|
+
* Valid values: 'none' | 'minimal' | 'low' | 'medium' | 'high' | 'xhigh'
|
|
3555
3556
|
*
|
|
3556
|
-
*
|
|
3557
|
-
* models.
|
|
3557
|
+
* The 'none' type for `reasoningEffort` is only available for OpenAI's GPT-5.1
|
|
3558
|
+
* models. Also, the 'xhigh' type for `reasoningEffort` is only available for
|
|
3559
|
+
* OpenAI's GPT-5.1-Codex-Max model. Setting `reasoningEffort` to 'none' or 'xhigh' with unsupported models will result in
|
|
3558
3560
|
* an error.
|
|
3559
3561
|
*/
|
|
3560
3562
|
reasoningEffort: import_v420.z.string().nullish(),
|
|
@@ -5480,7 +5482,7 @@ var OpenAITranscriptionModel = class {
|
|
|
5480
5482
|
};
|
|
5481
5483
|
|
|
5482
5484
|
// src/version.ts
|
|
5483
|
-
var VERSION = true ? "3.0.0-beta.
|
|
5485
|
+
var VERSION = true ? "3.0.0-beta.93" : "0.0.0-test";
|
|
5484
5486
|
|
|
5485
5487
|
// src/openai-provider.ts
|
|
5486
5488
|
function createOpenAI(options = {}) {
|